ngram
listlengths
0
67.8k
[ "job, result, processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback))", "if is_planar: np_image = np.transpose(np_image, (1, 2, 0)) expected = np.array([126, 131, 129,", "= iterator.GetKey() if result == _pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key] =", "== format np_image = callback.processed_image.to_py() del callback.processed_image np_image = np_image / max_val if", "def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip, callback):", "job, result, frame): self.frame = frame def ProcessComplete(self, job, result, processed_image): self.processed_image =", "isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame): iterator =", "test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release()", "assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image =", "assert height == 540 # from PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[...,", "format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type =", "{} while True: result, key = iterator.GetKey() if result == _pybraw.E_FAIL: break assert", "_pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame = frame def", "processed_image @pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec,", "callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True,", "def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit()", "1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ]) def test_SetResourceFormat(frame, codec, callback,", "= verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image = callback.processed_image.to_py() del callback.processed_image np_image =", "pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes,", "== 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {} while True: result,", "callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip, callback): read_job =", "import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame = frame", "verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format =", "= verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format", "del callback.processed_image np_image = np_image / max_val if is_planar: np_image = np.transpose(np_image, (1,", "the original DCI full frame 4K. width = verify(callback.processed_image.GetWidth()) assert width == 1024", "== 540 # from PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) #", "Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso", "ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec): callback = CapturingCallback()", "= verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2,", "# pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py()", "resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image = callback.processed_image.to_py()", "400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {} while True: result, key", "result == _pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert", "result, frame): self.frame = frame def ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image", "full frame 4K. width = verify(callback.processed_image.GetWidth()) assert width == 1024 height = verify(callback.processed_image.GetHeight())", "[0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ]) def test_SetResourceFormat(frame, codec,", ":3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso =", "True: result, key = iterator.GetKey() if result == _pybraw.E_FAIL: break assert result ==", "def ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec): callback =", "= verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def", "np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame):", "assert resource_format == format np_image = callback.processed_image.to_py() del callback.processed_image np_image = np_image /", "codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the", "callback @pytest.fixture def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return", "== 1024 height = verify(callback.processed_image.GetHeight()) assert height == 540 # from PIL import", "the resolution is one quarter of the original DCI full frame 4K. width", "np_image = callback.processed_image.to_py() del callback.processed_image np_image = np_image / max_val if is_planar: np_image", "verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the resolution is", "= verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance", "[0, 1, 2]), ]) def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format))", "iterator.GetKey() if result == _pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py()", "def ReadComplete(self, job, result, frame): self.frame = frame def ProcessComplete(self, job, result, processed_image):", "False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16,", "== _pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin']", "max_val if is_planar: np_image = np.transpose(np_image, (1, 2, 0)) expected = np.array([126, 131,", "import assert_allclose from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result,", "pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame =", "= verify(frame.GetMetadataIterator()) metadata = {} while True: result, key = iterator.GetKey() if result", "== 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() ==", "@pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0,", "= verify(callback.processed_image.GetWidth()) assert width == 1024 height = verify(callback.processed_image.GetHeight()) assert height == 540", "= {} while True: result, key = iterator.GetKey() if result == _pybraw.E_FAIL: break", "129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1 / 255) def test_SetResolutionScale(frame, codec,", "/ 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs()", "PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes", "format np_image = callback.processed_image.to_py() del callback.processed_image np_image = np_image / max_val if is_planar:", "process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU", "frame 4K. width = verify(callback.processed_image.GetWidth()) assert width == 1024 height = verify(callback.processed_image.GetHeight()) assert", "import numpy as np import pytest from numpy.testing import assert_allclose from pybraw import", "verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert", "1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0,", "def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels',", "width == 1024 height = verify(callback.processed_image.GetHeight()) assert height == 540 # from PIL", "3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]),", "numpy.testing import assert_allclose from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job,", "max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType())", "/ 255 assert_allclose(np_image[100, 200], expected, atol=1 / 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter))", "result, key = iterator.GetKey() if result == _pybraw.E_FAIL: break assert result == _pybraw.S_OK", "class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame = frame def ProcessComplete(self, job,", "verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image = callback.processed_image.to_py() del callback.processed_image np_image = np_image", "5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600", "numpy as np import pytest from numpy.testing import assert_allclose from pybraw import _pybraw,", "= Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes)", "def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso", "iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata", "1, 2]), ]) def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job", "resource_format == format np_image = callback.processed_image.to_py() del callback.processed_image np_image = np_image / max_val", "clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8,", "[ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1,", "255 assert_allclose(np_image[100, 200], expected, atol=1 / 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job", "# Check that the resolution is one quarter of the original DCI full", "1024 height = verify(callback.processed_image.GetHeight()) assert height == 540 # from PIL import Image", "test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso ==", "def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {} while True: result, key =", "self.processed_image = processed_image @pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture", "iterator = verify(frame.GetMetadataIterator()) metadata = {} while True: result, key = iterator.GetKey() if", "# pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert", "key = iterator.GetKey() if result == _pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key]", "result, processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return", "2]), ]) def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job =", "(_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ]) def test_SetResourceFormat(frame, codec, callback, format, max_val,", "self.frame = frame def ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image @pytest.fixture def", "# from PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def", "break assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600", "assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert", "verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type ==", "assert iso == 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {} while", "callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8,", "if result == _pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next())", "0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1,", "/ max_val if is_planar: np_image = np.transpose(np_image, (1, 2, 0)) expected = np.array([126,", "2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar,", "frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [", "np_image = np.transpose(np_image, (1, 2, 0)) expected = np.array([126, 131, 129, 255])[channels] /", "is_planar: np_image = np.transpose(np_image, (1, 2, 0)) expected = np.array([126, 131, 129, 255])[channels]", "4K. width = verify(callback.processed_image.GetWidth()) assert width == 1024 height = verify(callback.processed_image.GetHeight()) assert height", "codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs()", "verify(callback.processed_image.GetHeight()) assert height == 540 # from PIL import Image # pil_image =", "resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format ==", "= verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata =", "return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1,", "assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'],", "np_image = np_image / max_val if is_planar: np_image = np.transpose(np_image, (1, 2, 0))", "540 # from PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show()", "= frame def ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec):", "expected, atol=1 / 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit()", "callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type", "codec.FlushJobs() # Check that the resolution is one quarter of the original DCI", "131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1 / 255) def test_SetResolutionScale(frame,", "test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance", "read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]),", "np import pytest from numpy.testing import assert_allclose from pybraw import _pybraw, verify class", "(_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ])", "processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback", "frame): self.frame = frame def ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image @pytest.fixture", "as np import pytest from numpy.testing import assert_allclose from pybraw import _pybraw, verify", "of the original DCI full frame 4K. width = verify(callback.processed_image.GetWidth()) assert width ==", "_pybraw.E_FAIL: break assert result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] ==", "height = verify(callback.processed_image.GetHeight()) assert height == 540 # from PIL import Image #", "verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {}", "== 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 2800", "verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1,", "2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ]) def test_SetResourceFormat(frame, codec, callback, format,", "True, [0, 1, 2]), ]) def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels):", "channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type", "assert_allclose(np_image[100, 200], expected, atol=1 / 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job =", "test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that", "process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the resolution is one quarter of the", "= verify(callback.processed_image.GetHeight()) assert height == 540 # from PIL import Image # pil_image", "that the resolution is one quarter of the original DCI full frame 4K.", "pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert", "255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() #", "resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image = callback.processed_image.to_py() del callback.processed_image np_image", "= np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1 / 255)", "CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame = frame def ProcessComplete(self, job, result,", "= np.transpose(np_image, (1, 2, 0)) expected = np.array([126, 131, 129, 255])[channels] / 255", "assert_allclose from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame):", "assert width == 1024 height = verify(callback.processed_image.GetHeight()) assert height == 540 # from", "metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame):", "verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image", "test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {} while True: result, key = iterator.GetKey()", "callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the resolution", "while True: result, key = iterator.GetKey() if result == _pybraw.E_FAIL: break assert result", "200], expected, atol=1 / 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame())", "return callback @pytest.fixture def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs())", "np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1 / 255) def", "= verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance = verify(frame.GetMetadata('white_balance_kelvin'))", "= np_image / max_val if is_planar: np_image = np.transpose(np_image, (1, 2, 0)) expected", "frame def ProcessComplete(self, job, result, processed_image): self.processed_image = processed_image @pytest.fixture def callback(codec): callback", "import pytest from numpy.testing import assert_allclose from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback):", "callback.processed_image.to_py() del callback.processed_image np_image = np_image / max_val if is_planar: np_image = np.transpose(np_image,", "verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar,", "1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin',", "(_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]),", "process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the resolution is one", "np.transpose(np_image, (1, 2, 0)) expected = np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100,", "iso == 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator()) metadata = {} while True:", "verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the resolution is one quarter of", "verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin'))", "assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def", "from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame", "_pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def", "= verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format", "white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance =", "verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0,", "= callback.processed_image.to_py() del callback.processed_image np_image = np_image / max_val if is_planar: np_image =", "255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1 / 255) def test_SetResolutionScale(frame, codec, callback):", "_pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame): iterator = verify(frame.GetMetadataIterator())", "verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame):", "True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ]) def test_SetResourceFormat(frame,", "process_job.Release() codec.FlushJobs() # Check that the resolution is one quarter of the original", "codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format", "np_image / max_val if is_planar: np_image = np.transpose(np_image, (1, 2, 0)) expected =", "from PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame):", "metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py()", "white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() ==", "process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert", "== _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1]))", "from numpy.testing import assert_allclose from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self,", "_pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image = callback.processed_image.to_py() del callback.processed_image", "DCI full frame 4K. width = verify(callback.processed_image.GetWidth()) assert width == 1024 height =", "def test_GetMetadata(frame): white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800)))", "is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert", "verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release()", "== _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat()) assert resource_format == format np_image = callback.processed_image.to_py() del", "def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check", "verify(frame.GetMetadataIterator()) metadata = {} while True: result, key = iterator.GetKey() if result ==", "Check that the resolution is one quarter of the original DCI full frame", "attributes = verify(frame.CloneFrameProcessingAttributes()) assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400", "ReadComplete(self, job, result, frame): self.frame = frame def ProcessComplete(self, job, result, processed_image): self.processed_image", "read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame @pytest.mark.parametrize('format,max_val,is_planar,channels', [ (_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False,", "[2, 1, 0, 3]), (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True,", "expected = np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1 /", "height == 540 # from PIL import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3])", "2, 0)) expected = np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected,", "import Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes =", "result == _pybraw.S_OK metadata[key] = verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25,", "@pytest.fixture def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit()) read_job.Release() verify(codec.FlushJobs()) return callback.frame", "CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12)) verify(read_job.Submit())", "callback.processed_image np_image = np_image / max_val if is_planar: np_image = np.transpose(np_image, (1, 2,", "verify(callback.processed_image.GetWidth()) assert width == 1024 height = verify(callback.processed_image.GetHeight()) assert height == 540 #", "pytest from numpy.testing import assert_allclose from pybraw import _pybraw, verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def", "verify class CapturingCallback(_pybraw.BlackmagicRawCallback): def ReadComplete(self, job, result, frame): self.frame = frame def ProcessComplete(self,", "quarter of the original DCI full frame 4K. width = verify(callback.processed_image.GetWidth()) assert width", "verify(iterator.GetData()).to_py() verify(iterator.Next()) assert metadata['white_balance_kelvin'] == 5600 assert_allclose(metadata['sensor_rate'], np.array([25, 1])) def test_GetMetadata(frame): white_balance =", "= CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip, callback): read_job = verify(clip.CreateJobReadFrame(12))", "= verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release() codec.FlushJobs() # Check that the resolution is one quarter", "atol=1 / 255) def test_SetResolutionScale(frame, codec, callback): verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter)) process_job = verify(frame.CreateJobDecodeAndProcessFrame()) process_job.Submit() process_job.Release()", "2**16, True, [0, 1, 2]), ]) def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar,", "callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip, callback): read_job", "process_job.Submit() process_job.Release() codec.FlushJobs() resource_type = verify(callback.processed_image.GetResourceType()) assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU resource_format = verify(callback.processed_image.GetResourceFormat())", "resolution is one quarter of the original DCI full frame 4K. width =", "Image # pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3]) # pil_image.show() def test_CloneFrameProcessingAttributes(frame): attributes = verify(frame.CloneFrameProcessingAttributes())", "original DCI full frame 4K. width = verify(callback.processed_image.GetWidth()) assert width == 1024 height", "width = verify(callback.processed_image.GetWidth()) assert width == 1024 height = verify(callback.processed_image.GetHeight()) assert height ==", "assert white_balance.to_py() == 5600 def test_SetMetadata(frame): verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800))) white_balance = verify(frame.GetMetadata('white_balance_kelvin')) assert white_balance.to_py()", "= processed_image @pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def", "@pytest.fixture def callback(codec): callback = CapturingCallback() verify(codec.SetCallback(callback)) return callback @pytest.fixture def frame(codec, clip,", "1, True, [0, 1, 2]), (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]), ]) def", "one quarter of the original DCI full frame 4K. width = verify(callback.processed_image.GetWidth()) assert", "assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes) iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py() assert iso == 400 def test_GetMetadataIterator(frame): iterator", "is one quarter of the original DCI full frame 4K. width = verify(callback.processed_image.GetWidth())", "metadata = {} while True: result, key = iterator.GetKey() if result == _pybraw.E_FAIL:", "]) def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels): verify(frame.SetResourceFormat(format)) process_job = verify(frame.CreateJobDecodeAndProcessFrame())", "0)) expected = np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200], expected, atol=1", "(1, 2, 0)) expected = np.array([126, 131, 129, 255])[channels] / 255 assert_allclose(np_image[100, 200]," ]
[ "nową liczbę elementów: \")) self.Update() def policz_sume(self): sum = 0 for i in", "= int(input(\"Podaj nową liczbę elementów: \")) self.Update() def policz_sume(self): sum = 0 for", "+ 1 print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane() x.pobierz_parametry()", "elementów: \")) self.Update() def policz_sume(self): sum = 0 for i in range(self.ile_elementow): sum", "roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow self.Update() def", "= 0 for i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \", sum)", "ciag_arytmetyczny: global ciag def Update(self): self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow -", "self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica", "self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag,", "self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self): sum = 0 for i in", "roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self):", "class ciag_arytmetyczny: global ciag def Update(self): self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow", "print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane() x.pobierz_parametry() x.wyswietl_dane() x.policz_sume()", "nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \")) self.Update() def policz_sume(self):", "= int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow", "pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow self.Update()", "def policz_sume(self): sum = 0 for i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma", "for i in range(self.ile_elementow): sum = sum + 1 print(\"Suma elementow: \", sum)", "__init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow", "sum + 1 print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane()", "różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \")) self.Update() def policz_sume(self): sum", "wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] =", "\")) self.Update() def policz_sume(self): sum = 0 for i in range(self.ile_elementow): sum +=", "Update(self): self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica)", "i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def __init__(self,", "in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self): sum =", "self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \")) self.Update() def policz_sume(self): sum = 0", "+= self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica", "liczbę elementów: \")) self.Update() def policz_sume(self): sum = 0 for i in range(self.ile_elementow):", "i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self): sum", "= 0 for i in range(self.ile_elementow): sum = sum + 1 print(\"Suma elementow:", "self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart =", "sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane() x.pobierz_parametry() x.wyswietl_dane() x.policz_sume() x.policz_elementy() x.pobierz_elementy() x.wyswietl_dane()", "- 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow):", "elementow: \", sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane() x.pobierz_parametry() x.wyswietl_dane() x.policz_sume() x.policz_elementy()", "range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \"))", "1 print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane() x.pobierz_parametry() x.wyswietl_dane()", "pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \",", "pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj", "self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica =", "nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj", "in range(self.ile_elementow): sum = sum + 1 print(\"Suma elementow: \", sum) x =", "int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow =", "\")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \")) self.Update() def policz_sume(self): sum =", "\", sum) def policz_elementy(self): sum = 0 for i in range(self.ile_elementow): sum =", "= int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \")) self.Update()", "<filename>zadanka/zadanka.py class ciag_arytmetyczny: global ciag def Update(self): self.ciag = [self.pierwsza_wart] for i in", "pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę:", "policz_sume(self): sum = 0 for i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu:", "pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową", "= int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica =", "for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def", "range(self.ile_elementow): sum = sum + 1 print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1,", "in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz:", "self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica", "def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow =", "+ self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart", "0 for i in range(self.ile_elementow): sum = sum + 1 print(\"Suma elementow: \",", "1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart", "\")) self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów:", "def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i]", "ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i in", "self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę: \"))", "i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy", "def Update(self): self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart +", "self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow):", "\", sum) x = ciag_arytmetyczny(1, 1, 10) x.wyswietl_dane() x.pobierz_parametry() x.wyswietl_dane() x.policz_sume() x.policz_elementy() x.pobierz_elementy()", "int(input(\"Podaj nową liczbę elementów: \")) self.Update() def policz_sume(self): sum = 0 for i", "ciagu: \", sum) def policz_elementy(self): sum = 0 for i in range(self.ile_elementow): sum", "range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self): sum = 0", "ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self):", "range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart, roznica,", "in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica def __init__(self, pierwsza_wart,", "wyraz: \")) self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę", "ciag def Update(self): self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart", "self.roznica def __init__(self, pierwsza_wart, roznica, ile_elementow): self.pierwsza_wart = pierwsza_wart self.roznica = roznica self.ile_elementow", "= [self.pierwsza_wart] for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart +=", "= sum + 1 print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1, 1, 10)", "= pierwsza_wart self.roznica = roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\",", "for i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self):", "sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def", "end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart", "self.roznica = roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\")", "sum = sum + 1 print(\"Suma elementow: \", sum) x = ciag_arytmetyczny(1, 1,", "sum += self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self): sum = 0 for", "sum) def policz_elementy(self): sum = 0 for i in range(self.ile_elementow): sum = sum", "+= self.ciag[i] print(\"Suma ciagu: \", sum) def policz_elementy(self): sum = 0 for i", "self.Update() def policz_sume(self): sum = 0 for i in range(self.ile_elementow): sum += self.ciag[i]", "[self.pierwsza_wart] for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart += self.roznica", "= ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i", "self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for", "int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \")) self.Update() def", "policz_elementy(self): sum = 0 for i in range(self.ile_elementow): sum = sum + 1", "= roznica self.ile_elementow = ile_elementow self.Update() def wyswietl_dane(self): print(*self.ciag, sep=\", \", end=\"\\n\") def", "print(\"Suma ciagu: \", sum) def policz_elementy(self): sum = 0 for i in range(self.ile_elementow):", "for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy", "sum = 0 for i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \",", "sum = 0 for i in range(self.ile_elementow): sum = sum + 1 print(\"Suma", "self.roznica = int(input(\"Podaj nową różnicę: \")) self.ile_elementow = int(input(\"Podaj nową liczbę elementów: \"))", "0 for i in range(self.ile_elementow): sum += self.ciag[i] print(\"Suma ciagu: \", sum) def", "def policz_elementy(self): sum = 0 for i in range(self.ile_elementow): sum = sum +", "\", end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self):", "self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow - 1): self.ciag.append(self.pierwsza_wart + self.roznica) self.pierwsza_wart", "int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj", "def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\")) def pobierz_parametry(self): self.pierwsza_wart =", "global ciag def Update(self): self.ciag = [self.pierwsza_wart] for i in range(self.ile_elementow - 1):", "print(*self.ciag, sep=\", \", end=\"\\n\") def pobierz_elementy(self): for i in range(self.ile_elementow): self.ciag[i] = int(input(\"PodajElement\"))", "i in range(self.ile_elementow): sum = sum + 1 print(\"Suma elementow: \", sum) x", "def pobierz_parametry(self): self.pierwsza_wart = int(input(\"Podaj nowy pierwszy wyraz: \")) self.roznica = int(input(\"Podaj nową" ]
[ "'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video':", "ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in result: video = result['entries'][0] else: video", "+ \"[*] 1.Download an Audio playlist\") print(colors3 + \"[*] 2.Download a Video playlist\")", ": : :|-.._ ' ` _..-|: : : `:| |`:-:-.-:-:'| |:' : `.", "+ \"Re Run The Script With The Same URL And The Same Options", "data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors +", "break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4 + \"Re Run The", "# Url Download def download(link, data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except", "= [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 =", "link: \" + colors9) if not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" +", "'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list': { 'listsubtitles':", "Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors =", "network.\") return False except requests.exceptions.HTTPError as err: print(err) return False # Configuration File", "download(link, config['Audio']) else: print(colors8 + \"Unknown Choice :(\") check_str = str(input(colors7 + \"[*]", "|.' .' `. `-:_| | |_:-' .' - Welcome To PrNdOwN! `-._ ````", "Code Without Giving The Credits Nerd from __future__ import unicode_literals try: import os,sys,requests", "config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp == 4: download(link, config['Video']) elif check_inp", "install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2", "= cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 =", "Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"]", "try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\") return", "+ \"DownloadError Occurred !!!\") print(colors4 + \"Re Run The Script With The Same", "+ \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\"", "\" Please Try Again\" + colors6) exit(1) if check(link): print(colors6 + \"Title Video:", "except dl.utils.DownloadError as err: print(colors + err) # Extract URL Information def get_info(link):", "== 4: download(link, config['Video']) elif check_inp == 3: download(link, config['Audio']) else: print(colors8 +", ".'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o ) (o ).-' `.", "print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install It Using [pip3 install youtube-dl]')", "/`. .'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o ) (o ).-' `. :", ") (o ).-' `. : _ _ _`~(_)~`_ _ _ : : /:", "Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio playlist\") print(colors3 +", "elif check_inp == 4: download(link, config['Video']) elif check_inp == 3: download(link, config['Audio']) else:", "\"[*] 3.Download a Single Audio\") print(colors8 + \"[*] 4.Download a single video file\")", "the link: \" + colors9) if not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\"", "4.Download a single video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \"))", "Download def download(link, data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as", "print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try Again\" + colors6) exit(1)", "+ \" Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \"", "'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] },", "+ \"[!] Please check your network connection.\") return False except requests.exceptions.Timeout: print(colors2 +", "print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)]", "\"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try Again\" + colors6) exit(1) if check(link):", "clear(): clear = os.system('clear') return clear # banner def banner(): print(colors + \"\"\"", "net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please check your network connection.\")", "Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4 + \"Re Run", "The Same Options To Continue Downloading!\") exit(1) except RuntimeError: exit(1) if __name__ ==", "1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] = False", "..._: .-. .-. :_... .' '-.(o ) (o ).-' `. : _ _", "= cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 =", "_.-' ``-------'' \"\"\") # Check if user is connected to internet def net(url):", "requests.exceptions.RequestException as e: # catastrophic error. bail. print(e) sys.exit(1) return True # Check", "= cor[randint(0,15)] # Clear Screen def clear(): clear = os.system('clear') return clear #", "The Same URL And The Same Options To Continue Downloading!\") exit(1) except RuntimeError:", "3.Download a Single Audio\") print(colors8 + \"[*] 4.Download a single video file\") check_inp", "youtube_dl as dl import validators as valid from time import sleep as sl", ":|-.._ ' ` _..-|: : : `:| |`:-:-.-:-:'| |:' : `. `.| |", "For PornHub # Don't Copy The Code Without Giving The Credits Nerd from", "Hd Video Downloader For PornHub # Don't Copy The Code Without Giving The", "`. : _ _ _`~(_)~`_ _ _ : : /: ' .-=_ _=-.", "network connection.\") return False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking too", "[1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp ==", "# Created By r2dr0dn # Hd Video Downloader For PornHub # Don't Copy", "disconnected from network.\") return False except requests.exceptions.HTTPError as err: print(err) return False #", "return True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\") return False except", "many Redirects.\") return False except requests.exceptions.RequestException as e: # catastrophic error. bail. print(e)", "Found!') print('['+'*'*20+']') print('Please Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\"", "error. bail. print(e) sys.exit(1) return True # Check the validity of the given", "= cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 =", "True }, 'listformat': { 'lisformats': True } } # Url Download def download(link,", "= video['url'] return video_title # Main Function def main(): try: clear() banner() while", "colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] #", "in result: video = result['entries'][0] else: video = result video_title = video['title'] #", "}, 'listformat': { 'lisformats': True } } # Url Download def download(link, data):", "| | | | |.' .' `. `-:_| | |_:-' .' - Welcome", "the validity of the given url def check(link): try: requests.get(link) return True except", "check_inp == 4: download(link, config['Video']) elif check_inp == 3: download(link, config['Audio']) else: print(colors8", "Script With The Same URL And The Same Options To Continue Downloading!\") exit(1)", "Clear Screen def clear(): clear = os.system('clear') return clear # banner def banner():", "} } # Url Download def download(link, data): try: with dl.YoutubeDL(data) as ydl:", "+ colors6) exit(1) if check(link): print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5", "while True: try: if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 +", "_ : : /: ' .-=_ _=-. ` ;\\ : : :|-.._ '", "Choice :(\") check_str = str(input(colors7 + \"[*] Do You Want To Continue? (Y/n):", "banner def banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_...", "colors9) if not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \"", "err: print(colors + err) # Extract URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl':", "print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio", "requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\") return False except requests.exceptions.HTTPError as err:", "2: config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp == 4: download(link, config['Video']) elif", "def clear(): clear = os.system('clear') return clear # banner def banner(): print(colors +", "= False download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] = False download(link, config['Video'])", "check_inp == 2: config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp == 4: download(link,", "= int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if check_inp", "+ colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try Again\" +", "# banner def banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-.", "Screen def clear(): clear = os.system('clear') return clear # banner def banner(): print(colors", "int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if check_inp ==", "'137', }] }, 'list': { 'listsubtitles': True }, 'listformat': { 'lisformats': True }", "3: download(link, config['Audio']) else: print(colors8 + \"Unknown Choice :(\") check_str = str(input(colors7 +", "}] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat':", "time import sleep as sl from random import random,randint except ImportError: print('['+'*'*20+']') print('Module", "banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o", "print(colors5 + \"[*] 1.Download an Audio playlist\") print(colors3 + \"[*] 2.Download a Video", "{ 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137',", "e: # catastrophic error. bail. print(e) sys.exit(1) return True # Check the validity", "# Check if user is connected to internet def net(url): try: requests.get(url) except", "of the given url def check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4", "|`:-:-.-:-:'| |:' : `. `.| | | | | | |.' .' `.", "By r2dr0dn # Hd Video Downloader For PornHub # Don't Copy The Code", "'lisformats': True } } # Url Download def download(link, data): try: with dl.YoutubeDL(data)", ";\\ : : :|-.._ ' ` _..-|: : : `:| |`:-:-.-:-:'| |:' :", "colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\" + colors6) print(colors8 +", "ydl.download([link]) except dl.utils.DownloadError as err: print(colors + err) # Extract URL Information def", "video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in", "` ;\\ : : :|-.._ ' ` _..-|: : : `:| |`:-:-.-:-:'| |:'", "False download(link, config['Video']) elif check_inp == 4: download(link, config['Video']) elif check_inp == 3:", "clear # banner def banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-.", "}] }, 'list': { 'listsubtitles': True }, 'listformat': { 'lisformats': True } }", "_ _`~(_)~`_ _ _ : : /: ' .-=_ _=-. ` ;\\ :", "- Welcome To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") # Check if user", "With The Same URL And The Same Options To Continue Downloading!\") exit(1) except", "url def check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected", "main(): try: clear() banner() while True: try: if net('https://pornhub.com/'): link = input(colors2 +", "if not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid", "get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in", "False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\") return False except requests.exceptions.RequestException", "elif check_inp == 3: download(link, config['Audio']) else: print(colors8 + \"Unknown Choice :(\") check_str", "result video_title = video['title'] # video_url = video['url'] return video_title # Main Function", "To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") # Check if user is connected", "except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\") return False except requests.exceptions.RequestException as", "exit(1) if check(link): print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*]", "The Script With The Same URL And The Same Options To Continue Downloading!\")", "Status: Not Found!') print('['+'*'*20+']') print('Please Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']') #", "And The Same Options To Continue Downloading!\") exit(1) except RuntimeError: exit(1) if __name__", "Created By r2dr0dn # Hd Video Downloader For PornHub # Don't Copy The", "print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install It Using [pip3 install", "`. `.| | | | | | |.' .' `. `-:_| | |_:-'", ": : /: ' .-=_ _=-. ` ;\\ : : :|-.._ ' `", "+ \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try Again\" + colors6) exit(1) if", "youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 =", "import youtube_dl as dl import validators as valid from time import sleep as", "config['Video']) elif check_inp == 4: download(link, config['Video']) elif check_inp == 3: download(link, config['Audio'])", "Please Try Again\" + colors6) exit(1) if check(link): print(colors6 + \"Title Video: \"", ".-. .-. :_... .' '-.(o ) (o ).-' `. : _ _ _`~(_)~`_", "result = ydl2.extract_info(link,download=False) if 'entries' in result: video = result['entries'][0] else: video =", "Enter the link: \" + colors9) if not valid.url(link): print(\"\\n\" + colors8 +", "else: print(colors8 + \"Unknown Choice :(\") check_str = str(input(colors7 + \"[*] Do You", "print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\" + colors6)", "` _..-|: : : `:| |`:-:-.-:-:'| |:' : `. `.| | | |", "False # Configuration File config = { 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True,", "a Single Audio\") print(colors8 + \"[*] 4.Download a single video file\") check_inp =", "Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try", "+ \"[*] Do You Want To Continue? (Y/n): \")) if check_str in ['Y','y']:", "Please check your network connection.\") return False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site", "from network.\") return False except requests.exceptions.HTTPError as err: print(err) return False # Configuration", "+ \"Cya Next Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4", "requests.exceptions.ConnectionError: print(colors + \"[!] Please check your network connection.\") return False except requests.exceptions.Timeout:", "colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7", "}, 'list': { 'listsubtitles': True }, 'listformat': { 'lisformats': True } } #", "ydl2.extract_info(link,download=False) if 'entries' in result: video = result['entries'][0] else: video = result video_title", "#'preferredquality': '137', }] }, 'list': { 'listsubtitles': True }, 'listformat': { 'lisformats': True", "dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in result: video =", "+ err) # Extract URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with", "`:| |`:-:-.-:-:'| |:' : `. `.| | | | | | |.' .'", "== 1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] =", "if check_str in ['Y','y']: banner() continue else: print(colors6 + \"Cya Next Time\") break", "= False download(link, config['Video']) elif check_inp == 4: download(link, config['Video']) elif check_inp ==", "in ['Y','y']: banner() continue else: print(colors6 + \"Cya Next Time\") break except dl.utils.DownloadError:", "user is connected to internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors +", "``-------'' \"\"\") # Check if user is connected to internet def net(url): try:", "{ 'lisformats': True } } # Url Download def download(link, data): try: with", "\"[*] Too many Redirects.\") return False except requests.exceptions.RequestException as e: # catastrophic error.", "playlist\") print(colors7 + \"[*] 3.Download a Single Audio\") print(colors8 + \"[*] 4.Download a", "as sl from random import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not", "print(colors + \"[!] Please check your network connection.\") return False except requests.exceptions.Timeout: print(colors2", "False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking too long to load,", "single video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp", "return False except requests.exceptions.HTTPError as err: print(err) return False # Configuration File config", "`-:_| | |_:-' .' - Welcome To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\")", "cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen", "cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)]", "str(input(colors7 + \"[*] Do You Want To Continue? (Y/n): \")) if check_str in", "sys.exit(1) return True # Check the validity of the given url def check(link):", "Run The Script With The Same URL And The Same Options To Continue", "requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\") return False except requests.exceptions.RequestException as e:", "return True # Check the validity of the given url def check(link): try:", "the given url def check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 +", "return False # Configuration File config = { 'Audio': { 'format': 'bestaudio/best', 'noplaylist':", "not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\"", "def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if 'entries'", "Too many Redirects.\") return False except requests.exceptions.RequestException as e: # catastrophic error. bail.", "config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp ==", "link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the link: \"", "'mp3', 'preferredquality': '192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{", "Extract URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result =", "ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors + err) # Extract URL Information", "your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] =", "if check(link): print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download", "Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)]", ": /: ' .-=_ _=-. ` ;\\ : : :|-.._ ' ` _..-|:", ": `:| |`:-:-.-:-:'| |:' : `. `.| | | | | | |.'", "It Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors", "{ 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192',", "clear() banner() while True: try: if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" +", "taking too long to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*]", "Welcome To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") # Check if user is", "Options To Continue Downloading!\") exit(1) except RuntimeError: exit(1) if __name__ == '__main__': main()", "choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] = False", "check_inp == 1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist']", "cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)]", ".' '-.(o ) (o ).-' `. : _ _ _`~(_)~`_ _ _ :", "+ \"[!!!] Site is taking too long to load, TimeOut.\") return False except", "import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install", "print(colors2 + \"[!!!] Site is taking too long to load, TimeOut.\") return False", "with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors + err) #", "config['Audio']) else: print(colors8 + \"Unknown Choice :(\") check_str = str(input(colors7 + \"[*] Do", "| | |.' .' `. `-:_| | |_:-' .' - Welcome To PrNdOwN!", "except requests.exceptions.HTTPError as err: print(err) return False # Configuration File config = {", "except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install It Using", "[youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']')", "= cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen def clear():", ".-=_ _=-. ` ;\\ : : :|-.._ ' ` _..-|: : : `:|", "def check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from", "# Configuration File config = { 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors':", "'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list':", "check(link): print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an", "requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\") return False", "connection.\") return False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking too long", "print(colors3 + \"[*] Too many Redirects.\") return False except requests.exceptions.RequestException as e: #", "| |_:-' .' - Welcome To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") #", "to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\")", "(Y/n): \")) if check_str in ['Y','y']: banner() continue else: print(colors6 + \"Cya Next", "dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4 + \"Re Run The Script With", "is connected to internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!]", "print(colors3 + \"DownloadError Occurred !!!\") print(colors4 + \"Re Run The Script With The", "} # Url Download def download(link, data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link])", "cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3", "!!!\") print(colors4 + \"Re Run The Script With The Same URL And The", "sleep as sl from random import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status:", "config['Video']) elif check_inp == 3: download(link, config['Audio']) else: print(colors8 + \"Unknown Choice :(\")", "True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list': {", "+ \" Enter the link: \" + colors9) if not valid.url(link): print(\"\\n\" +", "+ \"[*] 3.Download a Single Audio\") print(colors8 + \"[*] 4.Download a single video", "file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]:", "`.| | | | | | |.' .' `. `-:_| | |_:-' .'", "try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors + err)", "'listsubtitles': True }, 'listformat': { 'lisformats': True } } # Url Download def", "elif check_inp == 2: config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp == 4:", "download(link, config['Video']) elif check_inp == 3: download(link, config['Audio']) else: print(colors8 + \"Unknown Choice", "download(link, data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors", "cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)]", "os.system('clear') return clear # banner def banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`.", "+ \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the link: \" + colors9) if", "' ` _..-|: : : `:| |`:-:-.-:-:'| |:' : `. `.| | |", "+ colors7 + \" Please Try Again\" + colors6) exit(1) if check(link): print(colors6", "The Code Without Giving The Credits Nerd from __future__ import unicode_literals try: import", "'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }]", "in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp", "cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen def clear(): clear", "print(e) sys.exit(1) return True # Check the validity of the given url def", "\")) if check_inp in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] = False download(link,", "\" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio playlist\") print(colors3 + \"[*]", ": : `:| |`:-:-.-:-:'| |:' : `. `.| | | | | |", "# Extract URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result", "# video_url = video['url'] return video_title # Main Function def main(): try: clear()", "['Y','y']: banner() continue else: print(colors6 + \"Cya Next Time\") break except dl.utils.DownloadError: print(colors3", "banner() continue else: print(colors6 + \"Cya Next Time\") break except dl.utils.DownloadError: print(colors3 +", "Main Function def main(): try: clear() banner() while True: try: if net('https://pornhub.com/'): link", "'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }]", "except requests.exceptions.ConnectionError: print(colors + \"[!] Please check your network connection.\") return False except", "download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp", "' .-=_ _=-. ` ;\\ : : :|-.._ ' ` _..-|: : :", "\" Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please", "\"[!] disconnected from network.\") return False except requests.exceptions.HTTPError as err: print(err) return False", "\"[*] 4.Download a single video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]:", "cor[randint(0,15)] # Clear Screen def clear(): clear = os.system('clear') return clear # banner", "}, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4',", "def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please check your network", "| | | | | |.' .' `. `-:_| | |_:-' .' -", "as err: print(err) return False # Configuration File config = { 'Audio': {", "/: ' .-=_ _=-. ` ;\\ : : :|-.._ ' ` _..-|: :", "'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video': { 'format':", "as err: print(colors + err) # Extract URL Information def get_info(link): ydl2 =", "True: try: if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \"", "net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the link:", "= str(input(colors7 + \"[*] Do You Want To Continue? (Y/n): \")) if check_str", "'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality':", "'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors':", ": :|-.._ ' ` _..-|: : : `:| |`:-:-.-:-:'| |:' : `. `.|", "load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\") return", "colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8", "as e: # catastrophic error. bail. print(e) sys.exit(1) return True # Check the", "Try Again\" + colors6) exit(1) if check(link): print(colors6 + \"Title Video: \" +colors+", "TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\") return False", "sl from random import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!')", "to internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please check", "check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if", "# Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4", "`. `-:_| | |_:-' .' - Welcome To PrNdOwN! `-._ ```` _.-' ``-------''", "Audio playlist\") print(colors3 + \"[*] 2.Download a Video playlist\") print(colors7 + \"[*] 3.Download", "\")) if check_str in ['Y','y']: banner() continue else: print(colors6 + \"Cya Next Time\")", "URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False)", "You Want To Continue? (Y/n): \")) if check_str in ['Y','y']: banner() continue else:", "\"[*] Do You Want To Continue? (Y/n): \")) if check_str in ['Y','y']: banner()", "# Main Function def main(): try: clear() banner() while True: try: if net('https://pornhub.com/'):", "print('Please Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor =", "{ 'listsubtitles': True }, 'listformat': { 'lisformats': True } } # Url Download", "try: clear() banner() while True: try: if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\"", "if check_inp in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] = False download(link, config['Audio'])", "an Audio playlist\") print(colors3 + \"[*] 2.Download a Video playlist\") print(colors7 + \"[*]", ":(\") check_str = str(input(colors7 + \"[*] Do You Want To Continue? (Y/n): \"))", "Next Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4 + \"Re", "colors7 + \" Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 +", "`-._ ```` _.-' ``-------'' \"\"\") # Check if user is connected to internet", "\"Unknown Choice :(\") check_str = str(input(colors7 + \"[*] Do You Want To Continue?", "_ _ : : /: ' .-=_ _=-. ` ;\\ : : :|-.._", "(o ).-' `. : _ _ _`~(_)~`_ _ _ : : /: '", "err: print(err) return False # Configuration File config = { 'Audio': { 'format':", "try: if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter", "playlist\") print(colors3 + \"[*] 2.Download a Video playlist\") print(colors7 + \"[*] 3.Download a", "File config = { 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key':", "as dl import validators as valid from time import sleep as sl from", "+ \"Unknown Choice :(\") check_str = str(input(colors7 + \"[*] Do You Want To", "[pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)]", "banner() while True: try: if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2", "Again\" + colors6) exit(1) if check(link): print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link)))", "check_inp == 3: download(link, config['Audio']) else: print(colors8 + \"Unknown Choice :(\") check_str =", "= { 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec':", "colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4", "False except requests.exceptions.HTTPError as err: print(err) return False # Configuration File config =", "+ colors9) if not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 +", "import sleep as sl from random import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl]", ").-' `. : _ _ _`~(_)~`_ _ _ : : /: ' .-=_", "video['title'] # video_url = video['url'] return video_title # Main Function def main(): try:", "\"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o ) (o ).-'", "|:' : `. `.| | | | | | |.' .' `. `-:_|", "The Credits Nerd from __future__ import unicode_literals try: import os,sys,requests import youtube_dl as", ".'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o ) (o ).-' `. : _", "== 3: download(link, config['Audio']) else: print(colors8 + \"Unknown Choice :(\") check_str = str(input(colors7", "colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9", "import os,sys,requests import youtube_dl as dl import validators as valid from time import", "= dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in result: video", "print(colors7 + \"[*] 3.Download a Single Audio\") print(colors8 + \"[*] 4.Download a single", "python3 # Created By r2dr0dn # Hd Video Downloader For PornHub # Don't", "your network connection.\") return False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking", "'%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in result: video = result['entries'][0]", "True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video': {", "except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4 + \"Re Run The Script", "colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6", "| |.' .' `. `-:_| | |_:-' .' - Welcome To PrNdOwN! `-._", "'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality':", "bail. print(e) sys.exit(1) return True # Check the validity of the given url", "Nerd from __future__ import unicode_literals try: import os,sys,requests import youtube_dl as dl import", "\"[!] Please check your network connection.\") return False except requests.exceptions.Timeout: print(colors2 + \"[!!!]", "Want To Continue? (Y/n): \")) if check_str in ['Y','y']: banner() continue else: print(colors6", "= result['entries'][0] else: video = result video_title = video['title'] # video_url = video['url']", "dl.utils.DownloadError as err: print(colors + err) # Extract URL Information def get_info(link): ydl2", "Giving The Credits Nerd from __future__ import unicode_literals try: import os,sys,requests import youtube_dl", "Occurred !!!\") print(colors4 + \"Re Run The Script With The Same URL And", "_..-|: : : `:| |`:-:-.-:-:'| |:' : `. `.| | | | |", "Same Options To Continue Downloading!\") exit(1) except RuntimeError: exit(1) if __name__ == '__main__':", "random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install It", "\"[*] 2.Download a Video playlist\") print(colors7 + \"[*] 3.Download a Single Audio\") print(colors8", "import validators as valid from time import sleep as sl from random import", "ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please Install It Using [pip3", "print(err) return False # Configuration File config = { 'Audio': { 'format': 'bestaudio/best',", "validity of the given url def check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError:", "if user is connected to internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors", "colors2 + \" Enter the link: \" + colors9) if not valid.url(link): print(\"\\n\"", "Continue? (Y/n): \")) if check_str in ['Y','y']: banner() continue else: print(colors6 + \"Cya", "|_:-' .' - Welcome To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") # Check", "import unicode_literals try: import os,sys,requests import youtube_dl as dl import validators as valid", "\"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio playlist\") print(colors3", "'-.(o ) (o ).-' `. : _ _ _`~(_)~`_ _ _ : :", "Do You Want To Continue? (Y/n): \")) if check_str in ['Y','y']: banner() continue", "r2dr0dn # Hd Video Downloader For PornHub # Don't Copy The Code Without", ".-. :_... .' '-.(o ) (o ).-' `. : _ _ _`~(_)~`_ _", "True } } # Url Download def download(link, data): try: with dl.YoutubeDL(data) as", "'192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegVideoConvertor',", "config = { 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio',", "cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)]", "try: import os,sys,requests import youtube_dl as dl import validators as valid from time", "'listformat': { 'lisformats': True } } # Url Download def download(link, data): try:", "print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o )", "+ \"[*] 2.Download a Video playlist\") print(colors7 + \"[*] 3.Download a Single Audio\")", "\"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the link: \" + colors9) if not", "a single video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if", "colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen def clear(): clear =", "'preferredquality': '192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key':", "video_url = video['url'] return video_title # Main Function def main(): try: clear() banner()", "return False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking too long to", "clear = os.system('clear') return clear # banner def banner(): print(colors + \"\"\" .'\\", "long to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many", "'mp4', #'preferredquality': '137', }] }, 'list': { 'listsubtitles': True }, 'listformat': { 'lisformats':", "+ \"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if check_inp == 1:", "Credits Nerd from __future__ import unicode_literals try: import os,sys,requests import youtube_dl as dl", "= os.system('clear') return clear # banner def banner(): print(colors + \"\"\" .'\\ /`.", "\"\"\") # Check if user is connected to internet def net(url): try: requests.get(url)", "video = result['entries'][0] else: video = result video_title = video['title'] # video_url =", "colors6) exit(1) if check(link): print(colors6 + \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 +", "continue else: print(colors6 + \"Cya Next Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError", "config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] = False download(link,", "def download(link, data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as err:", "print(colors6 + \"Cya Next Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\")", "#!/usr/bin/env python3 # Created By r2dr0dn # Hd Video Downloader For PornHub #", "unicode_literals try: import os,sys,requests import youtube_dl as dl import validators as valid from", "Same URL And The Same Options To Continue Downloading!\") exit(1) except RuntimeError: exit(1)", "\"Re Run The Script With The Same URL And The Same Options To", "= cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 =", "'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list': { 'listsubtitles': True }, 'listformat':", "colors7 + \" Please Try Again\" + colors6) exit(1) if check(link): print(colors6 +", "= result video_title = video['title'] # video_url = video['url'] return video_title # Main", "video = result video_title = video['title'] # video_url = video['url'] return video_title #", "dl import validators as valid from time import sleep as sl from random", "[\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)]", "from random import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']')", "[{ 'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list': { 'listsubtitles': True", "requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking too long to load, TimeOut.\") return", "Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try Again\"", "print(colors4 + \"[!] disconnected from network.\") return False except requests.exceptions.HTTPError as err: print(err)", "'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list': { 'listsubtitles': True },", "\"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio playlist\") print(colors3 + \"[*] 2.Download a", "def main(): try: clear() banner() while True: try: if net('https://pornhub.com/'): link = input(colors2", "Url Download def download(link, data): try: with dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError", "if check_inp == 1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif check_inp == 2:", "{ 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',", "__future__ import unicode_literals try: import os,sys,requests import youtube_dl as dl import validators as", "False except requests.exceptions.RequestException as e: # catastrophic error. bail. print(e) sys.exit(1) return True", "valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\" +", "cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)]", "with ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in result: video = result['entries'][0] else:", "\"[!!!] Site is taking too long to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects:", "_`~(_)~`_ _ _ : : /: ' .-=_ _=-. ` ;\\ : :", "\"Cya Next Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred !!!\") print(colors4 +", "return video_title # Main Function def main(): try: clear() banner() while True: try:", "cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen def clear(): clear = os.system('clear') return", "Check the validity of the given url def check(link): try: requests.get(link) return True", "ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if 'entries' in result:", "\" + colors9) if not valid.url(link): print(\"\\n\" + colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7", "Function def main(): try: clear() banner() while True: try: if net('https://pornhub.com/'): link =", ": `. `.| | | | | | |.' .' `. `-:_| |", "False download(link, config['Audio']) elif check_inp == 2: config['Video']['noplaylist'] = False download(link, config['Video']) elif", "Single Audio\") print(colors8 + \"[*] 4.Download a single video file\") check_inp = int(input(colors", "def banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_... .'", "+ colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\" + colors6) print(colors8", "# Hd Video Downloader For PornHub # Don't Copy The Code Without Giving", "Don't Copy The Code Without Giving The Credits Nerd from __future__ import unicode_literals", "2.Download a Video playlist\") print(colors7 + \"[*] 3.Download a Single Audio\") print(colors8 +", "+ \"[*] Too many Redirects.\") return False except requests.exceptions.RequestException as e: # catastrophic", "colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen def", "if 'entries' in result: video = result['entries'][0] else: video = result video_title =", "+ colors2 + \" Enter the link: \" + colors9) if not valid.url(link):", ".' - Welcome To PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") # Check if", "err) # Extract URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2:", "result: video = result['entries'][0] else: video = result video_title = video['title'] # video_url", "_ _ _`~(_)~`_ _ _ : : /: ' .-=_ _=-. ` ;\\", "is taking too long to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 +", "# catastrophic error. bail. print(e) sys.exit(1) return True # Check the validity of", "= video['title'] # video_url = video['url'] return video_title # Main Function def main():", "== 2: config['Video']['noplaylist'] = False download(link, config['Video']) elif check_inp == 4: download(link, config['Video'])", "# Don't Copy The Code Without Giving The Credits Nerd from __future__ import", "To Continue? (Y/n): \")) if check_str in ['Y','y']: banner() continue else: print(colors6 +", "= input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the link: \" +", "Without Giving The Credits Nerd from __future__ import unicode_literals try: import os,sys,requests import", "print(colors8 + \"[*] 4.Download a single video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter", "internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please check your", "os,sys,requests import youtube_dl as dl import validators as valid from time import sleep", "_=-. ` ;\\ : : :|-.._ ' ` _..-|: : : `:| |`:-:-.-:-:'|", "PornHub # Don't Copy The Code Without Giving The Credits Nerd from __future__", "\"[\"+colors4+\"------------Enter your choice------------\"+colors5+\"]: \")) if check_inp in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist']", "video_title = video['title'] # video_url = video['url'] return video_title # Main Function def", "else: print(colors6 + \"Cya Next Time\") break except dl.utils.DownloadError: print(colors3 + \"DownloadError Occurred", "return False except requests.exceptions.RequestException as e: # catastrophic error. bail. print(e) sys.exit(1) return", "+ \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._: .-. .-. :_... .' '-.(o ) (o", "print(colors4 + \"Re Run The Script With The Same URL And The Same", "catastrophic error. bail. print(e) sys.exit(1) return True # Check the validity of the", "= cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 =", "+ \"Title Video: \" +colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio playlist\")", "True # Check the validity of the given url def check(link): try: requests.get(link)", "dl.YoutubeDL(data) as ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors + err) # Extract", "requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please check your network connection.\") return False", "validators as valid from time import sleep as sl from random import random,randint", "```` _.-' ``-------'' \"\"\") # Check if user is connected to internet def", "print(colors3 + \"[*] 2.Download a Video playlist\") print(colors7 + \"[*] 3.Download a Single", "True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\") return False except requests.exceptions.HTTPError", "Copy The Code Without Giving The Credits Nerd from __future__ import unicode_literals try:", "except requests.exceptions.RequestException as e: # catastrophic error. bail. print(e) sys.exit(1) return True #", "'entries' in result: video = result['entries'][0] else: video = result video_title = video['title']", "Colors: Reset=\"\\033[0m\" cor = [\"\\033[1;33m\",\"\\033[1;34m\",\"\\033[1;30m\",\"\\033[1;36m\",\"\\033[1;31m\",\"\\033[35m\",\"\\033[95m\",\"\\033[96m\",\"\\033[39m\",\"\\033[38;5;82m\",\"\\033[38;5;198m\",\"\\033[38;5;208m\",\"\\033[38;5;167m\",\"\\033[38;5;91m\",\"\\033[38;5;210m\",\"\\033[38;5;165m\",\"\\033[38;5;49m\",\"\\033[38;5;160m\",\"\\033[38;5;51m\",\"\\033[38;5;13m\",\"\\033[38;5;162m\",\"\\033[38;5;203m\",\"\\033[38;5;113m\",\"\\033[38;5;14m\"] colors = cor[randint(0,15)] colors2 = cor[randint(0,15)] colors4 =", "Audio\") print(colors8 + \"[*] 4.Download a single video file\") check_inp = int(input(colors +", "Check if user is connected to internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError:", "= ydl2.extract_info(link,download=False) if 'entries' in result: video = result['entries'][0] else: video = result", "video_title # Main Function def main(): try: clear() banner() while True: try: if", "cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 = cor[randint(0,15)] colors6 = cor[randint(0,15)] colors7 = cor[randint(0,15)]", "| | | |.' .' `. `-:_| | |_:-' .' - Welcome To", "# Check the validity of the given url def check(link): try: requests.get(link) return", "except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\") return False except requests.exceptions.HTTPError as", "check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 + \"[!] disconnected from network.\")", "random import random,randint except ImportError: print('['+'*'*20+']') print('Module [youtube-dl] Status: Not Found!') print('['+'*'*20+']') print('Please", "Not Found!') print('['+'*'*20+']') print('Please Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors:", "check_str in ['Y','y']: banner() continue else: print(colors6 + \"Cya Next Time\") break except", "print(colors + err) # Extract URL Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})", "if net('https://pornhub.com/'): link = input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the", "else: video = result video_title = video['title'] # video_url = video['url'] return video_title", "requests.exceptions.HTTPError as err: print(err) return False # Configuration File config = { 'Audio':", ":_... .' '-.(o ) (o ).-' `. : _ _ _`~(_)~`_ _ _", "check your network connection.\") return False except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is", "'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist':", "PrNdOwN! `-._ ```` _.-' ``-------'' \"\"\") # Check if user is connected to", "Information def get_info(link): ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) with ydl2: result = ydl2.extract_info(link,download=False) if", "as valid from time import sleep as sl from random import random,randint except", "Video playlist\") print(colors7 + \"[*] 3.Download a Single Audio\") print(colors8 + \"[*] 4.Download", "from __future__ import unicode_literals try: import os,sys,requests import youtube_dl as dl import validators", "except requests.exceptions.Timeout: print(colors2 + \"[!!!] Site is taking too long to load, TimeOut.\")", "[{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best',", "print('['+'*'*20+']') print('Please Install It Using [pip3 install youtube-dl]') print('['+'*'*20+']') # Colors: Reset=\"\\033[0m\" cor", "4: download(link, config['Video']) elif check_inp == 3: download(link, config['Audio']) else: print(colors8 + \"Unknown", "connected to internet def net(url): try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please", "'list': { 'listsubtitles': True }, 'listformat': { 'lisformats': True } } # Url", "return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too many Redirects.\") return False except", "colors2 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5", "Video Downloader For PornHub # Don't Copy The Code Without Giving The Credits", "= cor[randint(0,15)] colors4 = cor[randint(0,15)] colors3 = cor[randint(0,15)] colors4 = cor[randint(0,15)] colors5 =", "colors9 = cor[randint(0,15)] # Clear Screen def clear(): clear = os.system('clear') return clear", "try: requests.get(url) except requests.exceptions.ConnectionError: print(colors + \"[!] Please check your network connection.\") return", ": _ _ _`~(_)~`_ _ _ : : /: ' .-=_ _=-. `", "= cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear Screen def clear(): clear = os.system('clear')", "# Clear Screen def clear(): clear = os.system('clear') return clear # banner def", "+ \"[*] 4.Download a single video file\") check_inp = int(input(colors + \"[\"+colors4+\"------------Enter your", "+ \" Please Try Again\" + colors6) exit(1) if check(link): print(colors6 + \"Title", "= cor[randint(0,15)] colors7 = cor[randint(0,15)] colors8 = cor[randint(0,15)] colors9 = cor[randint(0,15)] # Clear", "+ colors7 + \" Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7", "return clear # banner def banner(): print(colors + \"\"\" .'\\ /`. .'.-.`-'.-.`. ..._:", "Configuration File config = { 'Audio': { 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{", "from time import sleep as sl from random import random,randint except ImportError: print('['+'*'*20+']')", "'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] },", "'preferedformat': 'mp4', #'preferredquality': '137', }] }, 'list': { 'listsubtitles': True }, 'listformat': {", "\" Enter the link: \" + colors9) if not valid.url(link): print(\"\\n\" + colors8", "given url def check(link): try: requests.get(link) return True except requests.exceptions.ConnectionError: print(colors4 + \"[!]", "URL And The Same Options To Continue Downloading!\") exit(1) except RuntimeError: exit(1) if", "colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Please Try Again\" + colors6)", "+colors+ \"{}\".format(get_info(link))) print(colors5 + \"[*] 1.Download an Audio playlist\") print(colors3 + \"[*] 2.Download", "+ \"[!] disconnected from network.\") return False except requests.exceptions.HTTPError as err: print(err) return", "1.Download an Audio playlist\") print(colors3 + \"[*] 2.Download a Video playlist\") print(colors7 +", "\"[*] 1.Download an Audio playlist\") print(colors3 + \"[*] 2.Download a Video playlist\") print(colors7", "a Video playlist\") print(colors7 + \"[*] 3.Download a Single Audio\") print(colors8 + \"[*]", "check_inp in [1,2,3,4]: if check_inp == 1: config['Audio']['noplaylist'] = False download(link, config['Audio']) elif", "\"DownloadError Occurred !!!\") print(colors4 + \"Re Run The Script With The Same URL", "Redirects.\") return False except requests.exceptions.RequestException as e: # catastrophic error. bail. print(e) sys.exit(1)", "print(colors8 + \"Unknown Choice :(\") check_str = str(input(colors7 + \"[*] Do You Want", "\"[\"+colors2+\"!\"+colors5+\"]\" + colors7 + \" Unvalid Url!!!\" + colors6) print(colors8 + \"[\"+colors2+\"!\"+colors5+\"]\" +", "as ydl: ydl.download([link]) except dl.utils.DownloadError as err: print(colors + err) # Extract URL", "input(colors2 + \"[\"+colors3+\"*\"+colors4+\"]\" + colors2 + \" Enter the link: \" + colors9)", "download(link, config['Video']) elif check_inp == 4: download(link, config['Video']) elif check_inp == 3: download(link,", "Site is taking too long to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3", "video['url'] return video_title # Main Function def main(): try: clear() banner() while True:", ".' `. `-:_| | |_:-' .' - Welcome To PrNdOwN! `-._ ```` _.-'", "too long to load, TimeOut.\") return False except requests.exceptions.TooManyRedirects: print(colors3 + \"[*] Too", "'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }] }, 'Video': { 'format': 'bestvideo+bestaudio/best', 'noplaylist': True,", "Downloader For PornHub # Don't Copy The Code Without Giving The Credits Nerd", "check_str = str(input(colors7 + \"[*] Do You Want To Continue? (Y/n): \")) if", "valid from time import sleep as sl from random import random,randint except ImportError:", "result['entries'][0] else: video = result video_title = video['title'] # video_url = video['url'] return" ]
[ "= [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)],", "'w') as f: f.write('base mail') storage: list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir)", "from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1", "from mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import", "async def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir / 'index.html', 'w') as f:", "= Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) == 1", "assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' assert storage[0].get_payload() == 'base", "as f: f.write('base mail') storage: list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add(", "/ 'index.html', 'w') as f: f.write('base mail') storage: list[Message] = [] app =", "<root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>'", "'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert", "import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None: storage: list[Message] = []", "Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert storage[0]['From'] ==", "import Message from mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from", "@pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir / 'index.html', 'w') as", "<root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1 assert", "Path from kupala.application import Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def", "-> None: storage: list[Message] = [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>'))", "Path) -> None: with open(tmpdir / 'index.html', 'w') as f: f.write('base mail') storage:", "import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path from", "app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) ==", "Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert", "JinjaRendererPlugin from pathlib import Path from kupala.application import Kupala from kupala.mails import send_mail,", "mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path", "await send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio", "import Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None:", "def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir / 'index.html', 'w') as f: f.write('base", "[] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage)", "InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path from kupala.application import", "plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1 assert storage[0]['From']", "= [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert", "from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert storage[0]['From'] == 'root", "html_template='index.html') assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' assert storage[0].get_payload() ==", "), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1 assert storage[0]['From'] ==", "import JinjaRendererPlugin from pathlib import Path from kupala.application import Kupala from kupala.mails import", "def test_mail_regular_send() -> None: storage: list[Message] = [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage),", "'index.html', 'w') as f: f.write('base mail') storage: list[Message] = [] app = Kupala()", "f: f.write('base mail') storage: list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default',", "f.write('base mail') storage: list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer(", "from kupala.application import Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send()", "list[Message] = [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body'))", "<root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir / 'index.html', 'w')", "app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert storage[0]['From']", "= Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await", "app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html')", "send_mail(Email(subject='test', text='body')) assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async", "None: with open(tmpdir / 'index.html', 'w') as f: f.write('base mail') storage: list[Message] =", "subject='test', html_template='index.html') assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' assert storage[0].get_payload()", "import Path from kupala.application import Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async", "with open(tmpdir / 'index.html', 'w') as f: f.write('base mail') storage: list[Message] = []", "storage: list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root", "await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>'", "pathlib import Path from kupala.application import Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio", ") await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1 assert storage[0]['From'] == 'root", "from pathlib import Path from kupala.application import Kupala from kupala.mails import send_mail, send_templated_mail", "Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path from kupala.application import Kupala", "storage: list[Message] = [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await send_mail(Email(subject='test',", "list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>',", "-> None: with open(tmpdir / 'index.html', 'w') as f: f.write('base mail') storage: list[Message]", "'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir / 'index.html',", "@pytest.mark.asyncio async def test_mail_regular_send() -> None: storage: list[Message] = [] app = Kupala()", "app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test',", "InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) ==", "from email.message import Message from mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import", "Message from mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib", "app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), )", "text='body')) assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def", "import pytest from email.message import Message from mailers import Email, InMemoryTransport, Mailer from", "len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' assert storage[0].get_payload() == 'base mail\\n'", "send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None: storage: list[Message] = [] app =", "mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path from kupala.application import Kupala from kupala.mails", "Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost',", "== 1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) ->", "open(tmpdir / 'index.html', 'w') as f: f.write('base mail') storage: list[Message] = [] app", "1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None:", "send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None: storage: list[Message] = [] app", "email.message import Message from mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin", "async def test_mail_regular_send() -> None: storage: list[Message] = [] app = Kupala() app.mail.add('default',", "kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None: storage: list[Message] =", "assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir:", "storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir", "Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path from kupala.application", "None: storage: list[Message] = [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>')) await", "kupala.application import Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() ->", "pytest from email.message import Message from mailers import Email, InMemoryTransport, Mailer from mailers.plugins.jinja_renderer", "send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' assert", "len(storage) == 1 assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path)", "== 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir /", "test_mail_regular_send() -> None: storage: list[Message] = [] app = Kupala() app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root", "assert storage[0]['From'] == 'root <root@localhost>' @pytest.mark.asyncio async def test_send_templated_mail(tmpdir: Path) -> None: with", "test_send_templated_mail(tmpdir: Path) -> None: with open(tmpdir / 'index.html', 'w') as f: f.write('base mail')", "mail') storage: list[Message] = [] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage),", "from mailers.plugins.jinja_renderer import JinjaRendererPlugin from pathlib import Path from kupala.application import Kupala from", "[] app = Kupala() app.jinja.add_template_dirs(tmpdir) app.mail.add( 'default', Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ),", "Mailer( InMemoryTransport(storage), from_address='root <root@localhost>', plugins=[JinjaRendererPlugin(app.jinja.env)], ), ) await send_templated_mail(to='root@localhost', subject='test', html_template='index.html') assert len(storage)", "from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None: storage: list[Message]", "Kupala from kupala.mails import send_mail, send_templated_mail @pytest.mark.asyncio async def test_mail_regular_send() -> None: storage:" ]
[ "len(s1)-1 for s in s2: if s == s1[i]: i += 1 if", "\"\": return True if s1 == s2: return True final = len(s1)-1 for", "True if s1 == s2: return True final = len(s1)-1 for s in", "True final = len(s1)-1 for s in s2: if s == s1[i]: i", "s1 == \"\": return True if s1 == s2: return True final =", "your code here i = 0 if s1 == \"\": return True if", "== \"\": return True if s1 == s2: return True final = len(s1)-1", "code here i = 0 if s1 == \"\": return True if s1", "in s2: if s == s1[i]: i += 1 if i == final:", "for s in s2: if s == s1[i]: i += 1 if i", "final = len(s1)-1 for s in s2: if s == s1[i]: i +=", "i = 0 if s1 == \"\": return True if s1 == s2:", "= len(s1)-1 for s in s2: if s == s1[i]: i += 1", "s2: if s == s1[i]: i += 1 if i == final: return", "== s2: return True final = len(s1)-1 for s in s2: if s", "return True if s1 == s2: return True final = len(s1)-1 for s", "# Write your code here i = 0 if s1 == \"\": return", "s == s1[i]: i += 1 if i == final: return True return", "if s1 == \"\": return True if s1 == s2: return True final", "return True final = len(s1)-1 for s in s2: if s == s1[i]:", "s2): # Write your code here i = 0 if s1 == \"\":", "s2: return True final = len(s1)-1 for s in s2: if s ==", "if s1 == s2: return True final = len(s1)-1 for s in s2:", "s1 == s2: return True final = len(s1)-1 for s in s2: if", "s1, s2): # Write your code here i = 0 if s1 ==", "class Solution: def solve(self, s1, s2): # Write your code here i =", "def solve(self, s1, s2): # Write your code here i = 0 if", "here i = 0 if s1 == \"\": return True if s1 ==", "Write your code here i = 0 if s1 == \"\": return True", "solve(self, s1, s2): # Write your code here i = 0 if s1", "0 if s1 == \"\": return True if s1 == s2: return True", "Solution: def solve(self, s1, s2): # Write your code here i = 0", "= 0 if s1 == \"\": return True if s1 == s2: return", "== s1[i]: i += 1 if i == final: return True return False", "s in s2: if s == s1[i]: i += 1 if i ==", "if s == s1[i]: i += 1 if i == final: return True" ]
[ "as nx import numpy.linalg as la import scipy.cluster.vq as vq import matplotlib.pyplot as", "= np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k],", "with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig = plt.figure() axs = fig.add_subplot(111, aspect='equal')", "plt G = nx.karate_club_graph() print(\"Node Degree\") for v in G: print('%s %s' %", "plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k) fig = plt.figure()", "plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show()", "% (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig = plt.figure()", "print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig", "U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels", "= D - A l, U = la.eigh(L) f = U[:,1] labels =", "la import scipy.cluster.vq as vq import matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node", "= la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels)", "fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D =", "f = U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3", "l, U = la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure()", "iterations=1000) fig = plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k')", "nx.spring_layout(G, iterations=1000) fig = plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G,", "axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G)", "nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig = plt.figure() axs = fig.add_subplot(111,", "np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k)", "import matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node Degree\") for v in G:", "= nx.spring_layout(G, iterations=1000) fig = plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord)", "as plt G = nx.karate_club_graph() print(\"Node Degree\") for v in G: print('%s %s'", "nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L =", "networkx as nx import numpy.linalg as la import scipy.cluster.vq as vq import matplotlib.pyplot", "plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A =", "<gh_stars>0 import numpy as np import networkx as nx import numpy.linalg as la", "Degree\") for v in G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show()", "plt.show() coord = nx.spring_layout(G, iterations=1000) fig = plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off')", "la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show()", "np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l, U = la.eigh(L) f = U[:,1]", "nx import numpy.linalg as la import scipy.cluster.vq as vq import matplotlib.pyplot as plt", "coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D - A", "= fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D", "G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig = plt.figure() axs =", "G = nx.karate_club_graph() print(\"Node Degree\") for v in G: print('%s %s' % (v,", "as la import scipy.cluster.vq as vq import matplotlib.pyplot as plt G = nx.karate_club_graph()", "import numpy.linalg as la import scipy.cluster.vq as vq import matplotlib.pyplot as plt G", "plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l,", "= nx.karate_club_graph() print(\"Node Degree\") for v in G: print('%s %s' % (v, G.degree(v)))", "G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000)", "D - A l, U = la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f))", "coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels)", "aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1)))", "print(\"Node Degree\") for v in G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True)", "A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l, U", "in G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G,", "for v in G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord", "coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D", "L = D - A l, U = la.eigh(L) f = U[:,1] labels", "%s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig =", "matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node Degree\") for v in G: print('%s", "import networkx as nx import numpy.linalg as la import scipy.cluster.vq as vq import", "np import networkx as nx import numpy.linalg as la import scipy.cluster.vq as vq", "numpy as np import networkx as nx import numpy.linalg as la import scipy.cluster.vq", "= plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k) fig =", "coord = nx.spring_layout(G, iterations=1000) fig = plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G,", "labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels =", "nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D -", "as np import networkx as nx import numpy.linalg as la import scipy.cluster.vq as", "as vq import matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node Degree\") for v", "nx.karate_club_graph() print(\"Node Degree\") for v in G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G,", "axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A = nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L", "- A l, U = la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f)) fig", "import scipy.cluster.vq as vq import matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node Degree\")", "nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l, U = la.eigh(L)", "scipy.cluster.vq as vq import matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node Degree\") for", "= U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means,", "U = la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f)) fig = plt.figure() nx.draw_networkx_nodes(G,", "nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k) fig = plt.figure() nx.draw_networkx_nodes(G,", "fig = plt.figure() nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels) plt.show() k=3 means, labels = vq.kmeans2(U[:,1:k], k) fig", "= nx.adjacency_matrix(G) D = np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l, U =", "= plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show() A", "vq import matplotlib.pyplot as plt G = nx.karate_club_graph() print(\"Node Degree\") for v in", "= np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l, U = la.eigh(L) f =", "import numpy as np import networkx as nx import numpy.linalg as la import", "D = np.diag(np.ravel(np.sum(A,axis=1))) L = D - A l, U = la.eigh(L) f", "v in G: print('%s %s' % (v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord =", "(v, G.degree(v))) nx.draw_circular(G, with_labels=True) plt.show() coord = nx.spring_layout(G, iterations=1000) fig = plt.figure() axs", "fig = plt.figure() axs = fig.add_subplot(111, aspect='equal') axs.axis('off') nx.draw_networkx_edges(G, coord) nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k') plt.show()", "numpy.linalg as la import scipy.cluster.vq as vq import matplotlib.pyplot as plt G =", "A l, U = la.eigh(L) f = U[:,1] labels = np.ravel(np.sign(f)) fig =" ]
[ "from . import helpers from . import updates from . import simulations from", "helpers from . import updates from . import simulations from . import training", "import parameters from . import helpers from . import updates from . import", ". import parameters from . import helpers from . import updates from .", "from . import parameters from . import helpers from . import updates from", ". import helpers from . import updates from . import simulations from .", "parameters from . import helpers from . import updates from . import simulations", "import helpers from . import updates from . import simulations from . import" ]
[ "as _ from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name=", "app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name=", "path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name= 'delete'), path(\"ace_var_list\",ace_var_list,name=", "'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name= 'delete'), path(\"ace_var_list\",ace_var_list,name= 'detail'),", "obtain_auth_token app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'),", "* from django.urls import path from django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views", "django.urls import path from django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token", "from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name=", "path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name= 'delete'), path(\"ace_var_list\",ace_var_list,name= 'detail'), path(\"ace_var_<int:pk>\",ace_var_detail,name= 'delete'), ]", "= [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name=", "import path from django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token app_name", "django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns", "from django.urls import path from django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views import", "path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name= 'delete'), path(\"ace_var_list\",ace_var_list,name= 'detail'), path(\"ace_var_<int:pk>\",ace_var_detail,name=", "import * from django.urls import path from django.utils.translation import gettext_lazy as _ from", "= 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'),", "rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'),", "'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name= 'delete'),", "from .views import * from django.urls import path from django.utils.translation import gettext_lazy as", "path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name=", "from django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma'", "'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'), path(\"ace_<int:pk>\",ace_detail,name= 'delete'), path(\"ace_var_list\",ace_var_list,name= 'detail'), path(\"ace_var_<int:pk>\",ace_var_detail,name= 'delete'),", "urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'),", "import obtain_auth_token app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name=", "[ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'),", ".views import * from django.urls import path from django.utils.translation import gettext_lazy as _", "import gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns =", "'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name=", "path from django.utils.translation import gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token app_name =", "<reponame>UM6SS-Bioinfo-team/Cov-MA<gh_stars>0 from .views import * from django.urls import path from django.utils.translation import gettext_lazy", "'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name= 'detail'),", "path(\"patient_list\",patient_list,name= 'detail'), path(\"patient_<int:pk>\",patient_detail,name= 'delete'), path(\"virus_list\",virus_list,name= 'detail'), path(\"virus_<int:pk>\",virus_detail,name= 'delete'), path(\"virus_var_list\",virus_var_list,name= 'detail'), path(\"virus_var_<int:pk>\",virus_var_detail,name= 'delete'), path(\"ace_list\",ace_list,name=", "_ from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns = [ path(\"patient_list\",patient_list,name= 'detail'),", "gettext_lazy as _ from rest_framework.authtoken.views import obtain_auth_token app_name = 'covma' urlpatterns = [" ]
[ "def __init__( s, name:str='' ): super().__init__( 'dog', 'bark', name ) return class Cat(", "s.name = name s.sound = sound s.species = species return class Dog( Animal", "s.assertEqual( dog, rudy ) alex = Person( 'Alex', [rudy] ) persons = alex.dumps()", "class JsonSerializable test cases TODO: test file ops ''' def test_dumps_eq_loads( s )", "from non-existent file, file with improperly formatted content ''' return if __name__ ==", "persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s ) -> None: ''' test loads", "= Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog", "'cat', 'meouw', name ) return class Person( Animal ): def __init__( s, name:str='',", "''' test load from non-existent file, file with improperly formatted content ''' return", "Animal ): def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name )", "dogs ) ) s.assertTrue( dog == rudy ) s.assertEqual( dog, rudy ) alex", "): super().__init__( 'homo', 'blah', name ) s.pets = pets return class JsonSerializable_test( unittest.TestCase", "JsonSerializable ): def __init__( s, species:str='', sound:str='', name:str='' ): super().__init__() s.name = name", ") alex = Person( 'Alex', [rudy] ) persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}'", "__init__( s, species:str='', sound:str='', name:str='' ): super().__init__() s.name = name s.sound = sound", "formatted string ''' return def test_load_fail( s ) -> None: ''' test load", "'Alex', [rudy] ) persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail(", "Cat( Animal ): def __init__( s, name:str='' ): super().__init__( 'cat', 'meouw', name )", "'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual(", "None: rudy = Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' )", "'meouw', name ) return class Person( Animal ): def __init__( s, name:str='', pets:List[Animal]=[]", "= name s.sound = sound s.species = species return class Dog( Animal ):", "Animal( JsonSerializable ): def __init__( s, species:str='', sound:str='', name:str='' ): super().__init__() s.name =", "import JsonSerializable class Animal( JsonSerializable ): def __init__( s, species:str='', sound:str='', name:str='' ):", "'bark', name ) return class Cat( Animal ): def __init__( s, name:str='' ):", "super().__init__() s.name = name s.sound = sound s.species = species return class Dog(", "super().__init__( 'cat', 'meouw', name ) return class Person( Animal ): def __init__( s,", "''' test loads from improperly formatted string ''' return def test_load_fail( s )", "species:str='', sound:str='', name:str='' ): super().__init__() s.name = name s.sound = sound s.species =", "rudy ) alex = Person( 'Alex', [rudy] ) persons = alex.dumps() s.assertEqual( persons,", ") s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog == rudy ) s.assertEqual( dog,", "dog.loads( dogs ) ) s.assertTrue( dog == rudy ) s.assertEqual( dog, rudy )", "<filename>src/json_serializable_test.py<gh_stars>0 # # # from typing import List import unittest from json_serializable import", "rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog, rudy ) s.assertTrue(", "-> None: rudy = Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}'", ") s.pets = pets return class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test", "name:str='' ): super().__init__( 'dog', 'bark', name ) return class Cat( Animal ): def", "Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog()", "return class Cat( Animal ): def __init__( s, name:str='' ): super().__init__( 'cat', 'meouw',", "= rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog, rudy )", "def test_loads_fail( s ) -> None: ''' test loads from improperly formatted string", "# from typing import List import unittest from json_serializable import JsonSerializable class Animal(", "from improperly formatted string ''' return def test_load_fail( s ) -> None: '''", "Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog ==", "__init__( s, name:str='' ): super().__init__( 'cat', 'meouw', name ) return class Person( Animal", "class Person( Animal ): def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah',", "class Dog( Animal ): def __init__( s, name:str='' ): super().__init__( 'dog', 'bark', name", "dog, rudy ) alex = Person( 'Alex', [rudy] ) persons = alex.dumps() s.assertEqual(", "= species return class Dog( Animal ): def __init__( s, name:str='' ): super().__init__(", "= Person( 'Alex', [rudy] ) persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return", "'homo', 'blah', name ) s.pets = pets return class JsonSerializable_test( unittest.TestCase ): '''", "''' def test_dumps_eq_loads( s ) -> None: rudy = Dog( 'Rudy' ) dogs", "name:str='' ): super().__init__() s.name = name s.sound = sound s.species = species return", "'dog', 'bark', name ) return class Cat( Animal ): def __init__( s, name:str=''", "from json_serializable import JsonSerializable class Animal( JsonSerializable ): def __init__( s, species:str='', sound:str='',", "Dog( Animal ): def __init__( s, name:str='' ): super().__init__( 'dog', 'bark', name )", "s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog == rudy ) s.assertEqual( dog, rudy", ") ) s.assertTrue( dog == rudy ) s.assertEqual( dog, rudy ) alex =", "s.species = species return class Dog( Animal ): def __init__( s, name:str='' ):", "import List import unittest from json_serializable import JsonSerializable class Animal( JsonSerializable ): def", "'{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s ) -> None: ''' test loads from", ") -> None: ''' test loads from improperly formatted string ''' return def", "pets return class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test cases TODO: test", "): def __init__( s, species:str='', sound:str='', name:str='' ): super().__init__() s.name = name s.sound", "dog = Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs ) ) s.assertTrue(", ") return class Person( Animal ): def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__(", "test cases TODO: test file ops ''' def test_dumps_eq_loads( s ) -> None:", "def __init__( s, species:str='', sound:str='', name:str='' ): super().__init__() s.name = name s.sound =", ") return def test_loads_fail( s ) -> None: ''' test loads from improperly", "ops ''' def test_dumps_eq_loads( s ) -> None: rudy = Dog( 'Rudy' )", "''' class JsonSerializable test cases TODO: test file ops ''' def test_dumps_eq_loads( s", "s.pets = pets return class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test cases", "s.assertTrue( dog == rudy ) s.assertEqual( dog, rudy ) alex = Person( 'Alex',", "): ''' class JsonSerializable test cases TODO: test file ops ''' def test_dumps_eq_loads(", "loads from improperly formatted string ''' return def test_load_fail( s ) -> None:", "== rudy ) s.assertEqual( dog, rudy ) alex = Person( 'Alex', [rudy] )", "def __init__( s, name:str='' ): super().__init__( 'cat', 'meouw', name ) return class Person(", ") dog = Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs ) )", "json_serializable import JsonSerializable class Animal( JsonSerializable ): def __init__( s, species:str='', sound:str='', name:str=''", "JsonSerializable test cases TODO: test file ops ''' def test_dumps_eq_loads( s ) ->", "def test_load_fail( s ) -> None: ''' test load from non-existent file, file", "sound s.species = species return class Dog( Animal ): def __init__( s, name:str=''", "non-existent file, file with improperly formatted content ''' return if __name__ == '__main__':", "unittest from json_serializable import JsonSerializable class Animal( JsonSerializable ): def __init__( s, species:str='',", "alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s ) -> None: '''", "dog, rudy ) s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog == rudy )", "Animal ): def __init__( s, name:str='' ): super().__init__( 'cat', 'meouw', name ) return", "test load from non-existent file, file with improperly formatted content ''' return if", "rudy ) s.assertEqual( dog, rudy ) alex = Person( 'Alex', [rudy] ) persons", "s, name:str='' ): super().__init__( 'cat', 'meouw', name ) return class Person( Animal ):", ") -> None: ''' test load from non-existent file, file with improperly formatted", "name ) return class Person( Animal ): def __init__( s, name:str='', pets:List[Animal]=[] ):", ") -> None: rudy = Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs,", "file ops ''' def test_dumps_eq_loads( s ) -> None: rudy = Dog( 'Rudy'", "super().__init__( 'homo', 'blah', name ) s.pets = pets return class JsonSerializable_test( unittest.TestCase ):", ") s.assertTrue( dog == rudy ) s.assertEqual( dog, rudy ) alex = Person(", "dog == rudy ) s.assertEqual( dog, rudy ) alex = Person( 'Alex', [rudy]", "[rudy] ) persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s", "s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s ) -> None: ''' test", "# # # from typing import List import unittest from json_serializable import JsonSerializable", "test_loads_fail( s ) -> None: ''' test loads from improperly formatted string '''", "name ) return class Cat( Animal ): def __init__( s, name:str='' ): super().__init__(", "s, name:str='' ): super().__init__( 'dog', 'bark', name ) return class Cat( Animal ):", "# # from typing import List import unittest from json_serializable import JsonSerializable class", "s.sound = sound s.species = species return class Dog( Animal ): def __init__(", "JsonSerializable class Animal( JsonSerializable ): def __init__( s, species:str='', sound:str='', name:str='' ): super().__init__()", "s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name ) s.pets = pets return", "TODO: test file ops ''' def test_dumps_eq_loads( s ) -> None: rudy =", "typing import List import unittest from json_serializable import JsonSerializable class Animal( JsonSerializable ):", "species return class Dog( Animal ): def __init__( s, name:str='' ): super().__init__( 'dog',", "): def __init__( s, name:str='' ): super().__init__( 'cat', 'meouw', name ) return class", "unittest.TestCase ): ''' class JsonSerializable test cases TODO: test file ops ''' def", "name ) s.pets = pets return class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable", "rudy = Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog", "= alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s ) -> None:", ") dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog,", ") s.assertEqual( dog, rudy ) alex = Person( 'Alex', [rudy] ) persons =", "def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name ) s.pets =", "__init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name ) s.pets = pets", "= pets return class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test cases TODO:", ") persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s )", "cases TODO: test file ops ''' def test_dumps_eq_loads( s ) -> None: rudy", "'blah', name ) s.pets = pets return class JsonSerializable_test( unittest.TestCase ): ''' class", "None: ''' test loads from improperly formatted string ''' return def test_load_fail( s", "def test_dumps_eq_loads( s ) -> None: rudy = Dog( 'Rudy' ) dogs =", "s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog == rudy", "-> None: ''' test load from non-existent file, file with improperly formatted content", "test loads from improperly formatted string ''' return def test_load_fail( s ) ->", "Animal ): def __init__( s, name:str='' ): super().__init__( 'dog', 'bark', name ) return", "file, file with improperly formatted content ''' return if __name__ == '__main__': unittest.main()", "rudy ) s.assertTrue( dog.loads( dogs ) ) s.assertTrue( dog == rudy ) s.assertEqual(", "alex = Person( 'Alex', [rudy] ) persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' )", "load from non-existent file, file with improperly formatted content ''' return if __name__", ") return class Cat( Animal ): def __init__( s, name:str='' ): super().__init__( 'cat',", "s ) -> None: rudy = Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual(", "sound:str='', name:str='' ): super().__init__() s.name = name s.sound = sound s.species = species", "super().__init__( 'dog', 'bark', name ) return class Cat( Animal ): def __init__( s,", "class Cat( Animal ): def __init__( s, name:str='' ): super().__init__( 'cat', 'meouw', name", "'{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs )", "name s.sound = sound s.species = species return class Dog( Animal ): def", "''' return def test_load_fail( s ) -> None: ''' test load from non-existent", "s ) -> None: ''' test load from non-existent file, file with improperly", "string ''' return def test_load_fail( s ) -> None: ''' test load from", "test_dumps_eq_loads( s ) -> None: rudy = Dog( 'Rudy' ) dogs = rudy.dumps()", "List import unittest from json_serializable import JsonSerializable class Animal( JsonSerializable ): def __init__(", "s, species:str='', sound:str='', name:str='' ): super().__init__() s.name = name s.sound = sound s.species", "import unittest from json_serializable import JsonSerializable class Animal( JsonSerializable ): def __init__( s,", "): super().__init__() s.name = name s.sound = sound s.species = species return class", "dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog, rudy", "s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads(", "return class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test cases TODO: test file", "from typing import List import unittest from json_serializable import JsonSerializable class Animal( JsonSerializable", "JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test cases TODO: test file ops '''", "class Animal( JsonSerializable ): def __init__( s, species:str='', sound:str='', name:str='' ): super().__init__() s.name", "name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name ) s.pets = pets return class", "Person( 'Alex', [rudy] ) persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def", "s ) -> None: ''' test loads from improperly formatted string ''' return", "test_load_fail( s ) -> None: ''' test load from non-existent file, file with", "improperly formatted string ''' return def test_load_fail( s ) -> None: ''' test", "test file ops ''' def test_dumps_eq_loads( s ) -> None: rudy = Dog(", "Person( Animal ): def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name", "return def test_loads_fail( s ) -> None: ''' test loads from improperly formatted", "dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog = Dog() s.assertNotEqual( dog, rudy ) s.assertTrue( dog.loads( dogs", "): def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name ) s.pets", "): super().__init__( 'cat', 'meouw', name ) return class Person( Animal ): def __init__(", "persons = alex.dumps() s.assertEqual( persons, '{\"class_name\":\"Person\",\"name\":\"Alex\",\"pets\":['+dogs+'],\"sound\":\"blah\",\"species\":\"homo\"}' ) return def test_loads_fail( s ) ->", "name:str='' ): super().__init__( 'cat', 'meouw', name ) return class Person( Animal ): def", "): super().__init__( 'dog', 'bark', name ) return class Cat( Animal ): def __init__(", "None: ''' test load from non-existent file, file with improperly formatted content '''", "= Dog( 'Rudy' ) dogs = rudy.dumps() s.assertEqual( dogs, '{\"class_name\":\"Dog\",\"name\":\"Rudy\",\"sound\":\"bark\",\"species\":\"dog\"}' ) dog =", "): def __init__( s, name:str='' ): super().__init__( 'dog', 'bark', name ) return class", "pets:List[Animal]=[] ): super().__init__( 'homo', 'blah', name ) s.pets = pets return class JsonSerializable_test(", "= sound s.species = species return class Dog( Animal ): def __init__( s,", "-> None: ''' test loads from improperly formatted string ''' return def test_load_fail(", "return class Person( Animal ): def __init__( s, name:str='', pets:List[Animal]=[] ): super().__init__( 'homo',", "class JsonSerializable_test( unittest.TestCase ): ''' class JsonSerializable test cases TODO: test file ops", "return def test_load_fail( s ) -> None: ''' test load from non-existent file,", "return class Dog( Animal ): def __init__( s, name:str='' ): super().__init__( 'dog', 'bark',", "__init__( s, name:str='' ): super().__init__( 'dog', 'bark', name ) return class Cat( Animal" ]
[ "== \"__main__\": parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16,", "\"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key =", "parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model", "set_start_method from sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from util import", "rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must pass", "default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset:", "default=None, type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset)", "pair_sims_datasets_map if __name__ == \"__main__\": parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\",", "from util import pair_sims_datasets_map if __name__ == \"__main__\": parser = argparse.ArgumentParser() # model", "import set_start_method from sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from util", "pass a dataset!\") if args.subset: out_path = out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066)", "dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size),", "text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else: raise", "from functools import partial from multiprocess import set_start_method from sentence_transformers import SentenceTransformer from", "\"__main__\": parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int)", "args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset", "dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key,", "rdf_key = \"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset)", "= TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset + \".embed\"", "dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args =", "\"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset =", "= GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\"", "default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None,", "dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size), batched=True, batch_size=args.batch_size, with_rank=True, num_proc=2) dataset.to_json(out_path)", "if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path =", "multiprocess import set_start_method from sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from", "+ f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map,", "util import pair_sims_datasets_map if __name__ == \"__main__\": parser = argparse.ArgumentParser() # model args", "__name__ == \"__main__\": parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\",", "= argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset", "dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size), batched=True,", "= args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must pass a dataset!\") if args.subset:", "import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if __name__", "= args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key", "out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map(", "else: raise NotImplementedError(\"you must pass a dataset!\") if args.subset: out_path = out_path +", "from dataset_builders import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if __name__ == \"__main__\":", "parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name) if", "out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\"", "args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must pass a dataset!\") if args.subset: out_path", "GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if __name__ == \"__main__\": parser = argparse.ArgumentParser()", "import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if __name__ == \"__main__\": parser =", "default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None,", "\".embed\" else: raise NotImplementedError(\"you must pass a dataset!\") if args.subset: out_path = out_path", "must pass a dataset!\") if args.subset: out_path = out_path + f\".subset{args.subset}\" dataset =", "= parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\"", "args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key =", "\"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must", "from sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map", "type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key", "text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset:", "TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif", "parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) #", "SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if __name__ ==", "args = parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key =", "import argparse from functools import partial from multiprocess import set_start_method from sentence_transformers import", "partial from multiprocess import set_start_method from sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset,", "= out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset =", "= \"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset", "if __name__ == \"__main__\": parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str)", "out_path = out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset", "parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key", "= dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size), batched=True, batch_size=args.batch_size,", "sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if", "dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset +", "# model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\",", "= \"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you", "if args.subset: out_path = out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset))", "= dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key,", "args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str)", "GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else:", "args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args()", "TRexDataset from util import pair_sims_datasets_map if __name__ == \"__main__\": parser = argparse.ArgumentParser() #", "default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model =", "dataset_builders import GenWikiDataset, TRexDataset from util import pair_sims_datasets_map if __name__ == \"__main__\": parser", "dataset!\") if args.subset: out_path = out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset =", "f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model,", "# dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args", "parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\",", "type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int)", "dataset.select(range(args.subset)) set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size), batched=True, batch_size=args.batch_size, with_rank=True,", "type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name)", "parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset =", "\"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must pass a dataset!\")", "a dataset!\") if args.subset: out_path = out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset", "import pair_sims_datasets_map if __name__ == \"__main__\": parser = argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\",", "= SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\"", "model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key =", "raise NotImplementedError(\"you must pass a dataset!\") if args.subset: out_path = out_path + f\".subset{args.subset}\"", "argparse.ArgumentParser() # model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args", "parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str) parser.add_argument(\"--subset\",", "+ \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\"", "NotImplementedError(\"you must pass a dataset!\") if args.subset: out_path = out_path + f\".subset{args.subset}\" dataset", "<reponame>TevenLeScao/tetraencoder import argparse from functools import partial from multiprocess import set_start_method from sentence_transformers", "import partial from multiprocess import set_start_method from sentence_transformers import SentenceTransformer from dataset_builders import", "args.subset: out_path = out_path + f\".subset{args.subset}\" dataset = dataset.shuffle(seed=1066) dataset = dataset.select(range(args.subset)) set_start_method(\"spawn\")", "argparse from functools import partial from multiprocess import set_start_method from sentence_transformers import SentenceTransformer", "out_path = args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must pass a dataset!\") if", "type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None, type=str) parser.add_argument(\"--trex_dataset\", default=None, type=str)", "model args parser.add_argument(\"--model_name\", default=\"roberta-base\", type=str) parser.add_argument(f\"--batch_size\", default=16, type=int) # dataset args parser.add_argument(\"--gen_wiki_dataset\", default=None,", "\".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path", "+ \".embed\" else: raise NotImplementedError(\"you must pass a dataset!\") if args.subset: out_path =", "= \"rdf_linearized\" out_path = args.gen_wiki_dataset + \".embed\" else: raise NotImplementedError(\"you must pass a", "type=str) parser.add_argument(\"--subset\", default=None, type=int) args = parser.parse_args() model = SentenceTransformer(args.model_name) if args.trex_dataset: dataset", "set_start_method(\"spawn\") dataset = dataset.map( partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size), batched=True, batch_size=args.batch_size, with_rank=True, num_proc=2)", "from multiprocess import set_start_method from sentence_transformers import SentenceTransformer from dataset_builders import GenWikiDataset, TRexDataset", "dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.gen_wiki_dataset +", "= \"rdf_linearized\" out_path = args.trex_dataset + \".embed\" elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key", "args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path = args.trex_dataset", "SentenceTransformer(args.model_name) if args.trex_dataset: dataset = TRexDataset(args.trex_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path", "elif args.gen_wiki_dataset: dataset = GenWikiDataset(args.gen_wiki_dataset) text_key = \"text\" rdf_key = \"rdf_linearized\" out_path =", "functools import partial from multiprocess import set_start_method from sentence_transformers import SentenceTransformer from dataset_builders" ]
[ "options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel(", "models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', },", "migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name',", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType',", "by Django 1.11.6 on 2017-12-02 06:21 from __future__ import unicode_literals from django.db import", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural':", "migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ],", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ],", "models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ]", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[", "import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations = [", "('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()),", "on 2017-12-02 06:21 from __future__ import unicode_literals from django.db import migrations, models import", "(1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date',", "dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points', fields=[ ('id',", "('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)),", "('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True,", "), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'),", "), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points',", "06:21 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import", "-*- # Generated by Django 1.11.6 on 2017-12-02 06:21 from __future__ import unicode_literals", "from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone", "model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind',", "utf-8 -*- # Generated by Django 1.11.6 on 2017-12-02 06:21 from __future__ import", "= [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points',", "model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField(", "), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='season', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "<reponame>AfricaChess/lichesshub<filename>tournament/migrations/0004_auto_20171202_0621.py # -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-02", "name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1,", "migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date',", "unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies", "__future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class", "name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='season', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Season'), ), ]", "'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations =", "), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament',", "2017-12-02 06:21 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion", "model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False),", "name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament',", "Django 1.11.6 on 2017-12-02 06:21 from __future__ import unicode_literals from django.db import migrations,", "migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='season', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Season'),", "'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament',", "('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)), ], ),", "migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'),", "migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament',", "1.11.6 on 2017-12-02 06:21 from __future__ import unicode_literals from django.db import migrations, models", "Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament',", "migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points',", "name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()),", "Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points', fields=[", "name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ),", "field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ),", "name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True,", "] operations = [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement',", "migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced',", "('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament',", "operations = [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()),", "migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'),", "-*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-02 06:21 from", "'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False),", "), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament',", "[ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'),", "], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies =", "primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ),", "= [ ('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True,", "models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id',", "to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='season', field=models.ForeignKey(null=True,", "[ ('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "Generated by Django 1.11.6 on 2017-12-02 06:21 from __future__ import unicode_literals from django.db", "'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now),", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel(", "migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ),", "default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ),", "# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-02 06:21", "), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField(", "import unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration):", "'Points', }, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)),", "'Round Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField(", "('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season', fields=[", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2,", "field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField(", "model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3,", "models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points',", "), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0,", "(3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error',", "name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ),", "name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={", "models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)),", "import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations", "name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'),", "verbose_name='ID')), ('placement', models.PositiveIntegerField()), ('points', models.PositiveIntegerField()), ], options={ 'verbose_name_plural': 'Points', }, ), migrations.CreateModel( name='Season',", "models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)), ],", "(2, 'Round Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ),", "coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-02 06:21 from __future__", "field=models.CharField(max_length=50), ), migrations.AddField( model_name='points', name='tournament_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date', models.DateField()), ('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id',", "model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='tournament', name='name', field=models.CharField(max_length=50),", "], ), migrations.AddField( model_name='tournament', name='date', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField(", "('end_date', models.DateField()), ], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "}, ), migrations.CreateModel( name='Season', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('start_date',", "django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [", "model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='season', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Season'), ),", "# Generated by Django 1.11.6 on 2017-12-02 06:21 from __future__ import unicode_literals from", "django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tournament', '0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel(", "models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)), ], ), migrations.AddField(", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='kind', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'), ), migrations.AddField( model_name='tournament', name='season',", "], ), migrations.CreateModel( name='TournamentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type',", "'0003_auto_20171202_0513'), ] operations = [ migrations.CreateModel( name='Points', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='tournament', name='error', field=models.BooleanField(default=False), ), migrations.AddField( model_name='tournament', name='synced', field=models.BooleanField(default=False), ), migrations.AlterField(", "verbose_name='ID')), ('name', models.CharField(max_length=50)), ('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')]," ]
[ "sort objects of the same class, but they don't natively support comparison operation", "have no respect for the status quo. # You can quote them, disagree", "# Because they change things. They push the human race forward. # And", "users = [User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead of using", "User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead of using lambda, an alternative", "terms of the # MIT \"\"\" Problem: sort objects of the same class,", "# ch01 # # 🎂\"Here's to the crazy ones. The misfits. The rebels.", "them. About the only thing you can't do is ignore them. # Because", "as the creazy ones, we see genius. # Because the poeple who are", "under terms of the # MIT \"\"\" Problem: sort objects of the same", "u: u.user_id)) # Instead of using lambda, an alternative approach is # to", "'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead of", "# And while some may see them as the creazy ones, we see", "And while some may see them as the creazy ones, we see genius.", "© 2019. <NAME>. # All rights reserved. # # Distributed under terms of", "who are crazy enough to think thay can change # the world, are", "they change things. They push the human race forward. # And while some", "to the crazy ones. The misfits. The rebels. # The troublemakers. The round", "glority or vilify # them. About the only thing you can't do is", "operator import attrgetter class User: def __init__(self, user_id): self.user_id = user_id def __repr__(self):", "that can be passed \"\"\" from operator import attrgetter class User: def __init__(self,", "# of rules. And they have no respect for the status quo. #", "the world, are the ones who do.\" # # Created by <NAME> on", "-*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to the crazy ones.", "natively support comparison operation Solution: The built-in sorted() function takes a key argument", "pegs in the square holes. # The ones who see things differently. They're", "holes. # The ones who see things differently. They're not found # of", "see genius. # Because the poeple who are crazy enough to think thay", "The round pegs in the square holes. # The ones who see things", "race forward. # And while some may see them as the creazy ones,", "User: def __init__(self, user_id): self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id) users =", "print(sorted(users, key=lambda u: u.user_id)) # Instead of using lambda, an alternative approach is", "of using lambda, an alternative approach is # to use operator.attrgetter() print(sorted(users, key=attrgetter('user_id')))", "them, glority or vilify # them. About the only thing you can't do", "the human race forward. # And while some may see them as the", "or vilify # them. About the only thing you can't do is ignore", "are the ones who do.\" # # Created by <NAME> on 01/27/19 16:07.", "def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id))", "ch01 # # 🎂\"Here's to the crazy ones. The misfits. The rebels. #", "objects of the same class, but they don't natively support comparison operation Solution:", "rules. And they have no respect for the status quo. # You can", "return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead", "= [User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead of using lambda,", "passed \"\"\" from operator import attrgetter class User: def __init__(self, user_id): self.user_id =", "disagree with them, glority or vilify # them. About the only thing you", "to think thay can change # the world, are the ones who do.\"", "Because they change things. They push the human race forward. # And while", "# The ones who see things differently. They're not found # of rules.", "round pegs in the square holes. # The ones who see things differently.", "in the square holes. # The ones who see things differently. They're not", "is ignore them. # Because they change things. They push the human race", "respect for the status quo. # You can quote them, disagree with them,", "of the same class, but they don't natively support comparison operation Solution: The", "You can quote them, disagree with them, glority or vilify # them. About", "# Distributed under terms of the # MIT \"\"\" Problem: sort objects of", "things differently. They're not found # of rules. And they have no respect", "only thing you can't do is ignore them. # Because they change things.", "of the # MIT \"\"\" Problem: sort objects of the same class, but", "[User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead of using lambda, an", "can change # the world, are the ones who do.\" # # Created", "01/27/19 16:07. # Copyright © 2019. <NAME>. # All rights reserved. # #", "the same class, but they don't natively support comparison operation Solution: The built-in", "Distributed under terms of the # MIT \"\"\" Problem: sort objects of the", "are crazy enough to think thay can change # the world, are the", "__repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users, key=lambda u: u.user_id)) #", "# # Distributed under terms of the # MIT \"\"\" Problem: sort objects", "change things. They push the human race forward. # And while some may", "found # of rules. And they have no respect for the status quo.", "The built-in sorted() function takes a key argument that can be passed \"\"\"", "change # the world, are the ones who do.\" # # Created by", "argument that can be passed \"\"\" from operator import attrgetter class User: def", "not found # of rules. And they have no respect for the status", "the square holes. # The ones who see things differently. They're not found", "thing you can't do is ignore them. # Because they change things. They", "the ones who do.\" # # Created by <NAME> on 01/27/19 16:07. #", "key argument that can be passed \"\"\" from operator import attrgetter class User:", "The rebels. # The troublemakers. The round pegs in the square holes. #", "the poeple who are crazy enough to think thay can change # the", "The ones who see things differently. They're not found # of rules. And", "Copyright © 2019. <NAME>. # All rights reserved. # # Distributed under terms", "by <NAME> on 01/27/19 16:07. # Copyright © 2019. <NAME>. # All rights", "who do.\" # # Created by <NAME> on 01/27/19 16:07. # Copyright ©", "user_id): self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)]", "crazy ones. The misfits. The rebels. # The troublemakers. The round pegs in", "enough to think thay can change # the world, are the ones who", "/usr/bin/env python3 # -*- coding: utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 #", "square holes. # The ones who see things differently. They're not found #", "misfits. The rebels. # The troublemakers. The round pegs in the square holes.", "do.\" # # Created by <NAME> on 01/27/19 16:07. # Copyright © 2019.", "with them, glority or vilify # them. About the only thing you can't", "All rights reserved. # # Distributed under terms of the # MIT \"\"\"", "rights reserved. # # Distributed under terms of the # MIT \"\"\" Problem:", "And they have no respect for the status quo. # You can quote", "status quo. # You can quote them, disagree with them, glority or vilify", "class User: def __init__(self, user_id): self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id) users", "world, are the ones who do.\" # # Created by <NAME> on 01/27/19", "support comparison operation Solution: The built-in sorted() function takes a key argument that", "quo. # You can quote them, disagree with them, glority or vilify #", "crazy enough to think thay can change # the world, are the ones", "while some may see them as the creazy ones, we see genius. #", "vilify # them. About the only thing you can't do is ignore them.", "ignore them. # Because they change things. They push the human race forward.", "# # Created by <NAME> on 01/27/19 16:07. # Copyright © 2019. <NAME>.", "import attrgetter class User: def __init__(self, user_id): self.user_id = user_id def __repr__(self): return", "the creazy ones, we see genius. # Because the poeple who are crazy", "# # 🎂\"Here's to the crazy ones. The misfits. The rebels. # The", "takes a key argument that can be passed \"\"\" from operator import attrgetter", "they don't natively support comparison operation Solution: The built-in sorted() function takes a", "a key argument that can be passed \"\"\" from operator import attrgetter class", "Because the poeple who are crazy enough to think thay can change #", "# them. About the only thing you can't do is ignore them. #", "ones. The misfits. The rebels. # The troublemakers. The round pegs in the", "# -*- coding: utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's", "About the only thing you can't do is ignore them. # Because they", "quote them, disagree with them, glority or vilify # them. About the only", "<NAME> on 01/27/19 16:07. # Copyright © 2019. <NAME>. # All rights reserved.", "# All rights reserved. # # Distributed under terms of the # MIT", "from operator import attrgetter class User: def __init__(self, user_id): self.user_id = user_id def", "ones who see things differently. They're not found # of rules. And they", "them. # Because they change things. They push the human race forward. #", "u.user_id)) # Instead of using lambda, an alternative approach is # to use", "1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to the crazy ones. The misfits. The", "def __init__(self, user_id): self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23),", "= user_id def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users, key=lambda", "#! /usr/bin/env python3 # -*- coding: utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01", "troublemakers. The round pegs in the square holes. # The ones who see", "do is ignore them. # Because they change things. They push the human", "can't do is ignore them. # Because they change things. They push the", "utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to the crazy", "Created by <NAME> on 01/27/19 16:07. # Copyright © 2019. <NAME>. # All", "can be passed \"\"\" from operator import attrgetter class User: def __init__(self, user_id):", "see them as the creazy ones, we see genius. # Because the poeple", "ones who do.\" # # Created by <NAME> on 01/27/19 16:07. # Copyright", "for the status quo. # You can quote them, disagree with them, glority", "user_id def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users, key=lambda u:", "# MIT \"\"\" Problem: sort objects of the same class, but they don't", "key=lambda u: u.user_id)) # Instead of using lambda, an alternative approach is #", "forward. # And while some may see them as the creazy ones, we", "🎂\"Here's to the crazy ones. The misfits. The rebels. # The troublemakers. The", "Problem: sort objects of the same class, but they don't natively support comparison", "they have no respect for the status quo. # You can quote them,", "but they don't natively support comparison operation Solution: The built-in sorted() function takes", "see things differently. They're not found # of rules. And they have no", "-*- coding: utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to", "creazy ones, we see genius. # Because the poeple who are crazy enough", "poeple who are crazy enough to think thay can change # the world,", "be passed \"\"\" from operator import attrgetter class User: def __init__(self, user_id): self.user_id", "class, but they don't natively support comparison operation Solution: The built-in sorted() function", "operation Solution: The built-in sorted() function takes a key argument that can be", "# Instead of using lambda, an alternative approach is # to use operator.attrgetter()", "them, disagree with them, glority or vilify # them. About the only thing", "you can't do is ignore them. # Because they change things. They push", "2019. <NAME>. # All rights reserved. # # Distributed under terms of the", "same class, but they don't natively support comparison operation Solution: The built-in sorted()", "Solution: The built-in sorted() function takes a key argument that can be passed", "MIT \"\"\" Problem: sort objects of the same class, but they don't natively", "sorted() function takes a key argument that can be passed \"\"\" from operator", "don't natively support comparison operation Solution: The built-in sorted() function takes a key", "may see them as the creazy ones, we see genius. # Because the", "built-in sorted() function takes a key argument that can be passed \"\"\" from", "push the human race forward. # And while some may see them as", "python3 # -*- coding: utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # #", "differently. They're not found # of rules. And they have no respect for", "think thay can change # the world, are the ones who do.\" #", "comparison operation Solution: The built-in sorted() function takes a key argument that can", "some may see them as the creazy ones, we see genius. # Because", "function takes a key argument that can be passed \"\"\" from operator import", "who see things differently. They're not found # of rules. And they have", "can quote them, disagree with them, glority or vilify # them. About the", "# You can quote them, disagree with them, glority or vilify # them.", "# Created by <NAME> on 01/27/19 16:07. # Copyright © 2019. <NAME>. #", "__init__(self, user_id): self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3),", "# # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to the crazy ones. The", "Instead of using lambda, an alternative approach is # to use operator.attrgetter() print(sorted(users,", "the crazy ones. The misfits. The rebels. # The troublemakers. The round pegs", "thay can change # the world, are the ones who do.\" # #", "no respect for the status quo. # You can quote them, disagree with", "# the world, are the ones who do.\" # # Created by <NAME>", "reserved. # # Distributed under terms of the # MIT \"\"\" Problem: sort", "# The troublemakers. The round pegs in the square holes. # The ones", "the only thing you can't do is ignore them. # Because they change", "on 01/27/19 16:07. # Copyright © 2019. <NAME>. # All rights reserved. #", "rebels. # The troublemakers. The round pegs in the square holes. # The", "User(99)] print(sorted(users, key=lambda u: u.user_id)) # Instead of using lambda, an alternative approach", "# Because the poeple who are crazy enough to think thay can change", "attrgetter class User: def __init__(self, user_id): self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id)", "# 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to the crazy ones. The misfits.", "The troublemakers. The round pegs in the square holes. # The ones who", "coding: utf-8 -*- # # 1.14.Sorting_Objects_Without_Native_Comparison_Support.py # ch01 # # 🎂\"Here's to the", "the # MIT \"\"\" Problem: sort objects of the same class, but they", "# Copyright © 2019. <NAME>. # All rights reserved. # # Distributed under", "The misfits. The rebels. # The troublemakers. The round pegs in the square", "# 🎂\"Here's to the crazy ones. The misfits. The rebels. # The troublemakers.", "They push the human race forward. # And while some may see them", "of rules. And they have no respect for the status quo. # You", "ones, we see genius. # Because the poeple who are crazy enough to", "\"\"\" from operator import attrgetter class User: def __init__(self, user_id): self.user_id = user_id", "They're not found # of rules. And they have no respect for the", "them as the creazy ones, we see genius. # Because the poeple who", "\"\"\" Problem: sort objects of the same class, but they don't natively support", "self.user_id = user_id def __repr__(self): return 'User({})'.format(self.user_id) users = [User(23), User(3), User(99)] print(sorted(users,", "genius. # Because the poeple who are crazy enough to think thay can", "things. They push the human race forward. # And while some may see", "<NAME>. # All rights reserved. # # Distributed under terms of the #", "human race forward. # And while some may see them as the creazy", "the status quo. # You can quote them, disagree with them, glority or", "we see genius. # Because the poeple who are crazy enough to think", "16:07. # Copyright © 2019. <NAME>. # All rights reserved. # # Distributed" ]
[ "# 3rd party import pytest from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import", "pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code), \"bad_code.py\") check_file_regression(stdout, file_regression) assert", "coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main import cli #", "def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout =", "from flake8.main import cli # type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def", "# type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with", "with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code), \"bad_code.py\") check_file_regression(stdout, file_regression)", "import PathPlus from flake8.main import cli # type: ignore bad_code = PathPlus(__file__).parent /", "import pytest from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main", "from domdf_python_tools.paths import PathPlus from flake8.main import cli # type: ignore bad_code =", "domdf_python_tools.paths import PathPlus from flake8.main import cli # type: ignore bad_code = PathPlus(__file__).parent", "3rd party import pytest from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus", "\"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout", "check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main import cli # type: ignore bad_code", "\"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code), \"bad_code.py\") check_file_regression(stdout, file_regression) assert not capsys.readouterr().err", "pytest from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main import", "type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit):", "<reponame>domdfcoding/flake8-github-action # 3rd party import pytest from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths", "FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code), \"bad_code.py\")", "import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main import cli # type:", "cli # type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys):", "= PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\",", "ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code),", "import cli # type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture,", "PathPlus from flake8.main import cli # type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\"", "bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\",", "/ \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"])", "test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code),", "from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main import cli", "cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code), \"bad_code.py\") check_file_regression(stdout, file_regression) assert not", "capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\", \"github\"]) stdout = capsys.readouterr().out.replace(str(bad_code), \"bad_code.py\") check_file_regression(stdout,", "flake8.main import cli # type: ignore bad_code = PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression:", "PathPlus(__file__).parent / \"bad_code.py\" def test_output(file_regression: FileRegressionFixture, capsys): with pytest.raises(SystemExit): cli.main([str(bad_code), \"--select\", \"F401,F404,F821,F701,E303\", \"--format\",", "party import pytest from coincidence.regressions import FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from", "FileRegressionFixture, check_file_regression from domdf_python_tools.paths import PathPlus from flake8.main import cli # type: ignore" ]
[ "all the even numbers between 1 and A. class Solution: # @param A", "and return the sum of all the even numbers between 1 and A.", "you need to find and return the sum of all the even numbers", "between 1 and A. class Solution: # @param A : integer # @return", "@param A : integer # @return an integer def solve(self, A): sum=0; for", "A : integer # @return an integer def solve(self, A): sum=0; for i", "def solve(self, A): sum=0; for i in range (1,A+1): if (i%2==0): sum+= i", "# @param A : integer # @return an integer def solve(self, A): sum=0;", "A, you need to find and return the sum of all the even", "return the sum of all the even numbers between 1 and A. class", "an integer A, you need to find and return the sum of all", "given an integer A, you need to find and return the sum of", "even numbers between 1 and A. class Solution: # @param A : integer", "integer def solve(self, A): sum=0; for i in range (1,A+1): if (i%2==0): sum+=", "sum of all the even numbers between 1 and A. class Solution: #", "integer # @return an integer def solve(self, A): sum=0; for i in range", "the even numbers between 1 and A. class Solution: # @param A :", "and A. class Solution: # @param A : integer # @return an integer", "A. class Solution: # @param A : integer # @return an integer def", "A): sum=0; for i in range (1,A+1): if (i%2==0): sum+= i return sum", "solve(self, A): sum=0; for i in range (1,A+1): if (i%2==0): sum+= i return", "# @return an integer def solve(self, A): sum=0; for i in range (1,A+1):", "an integer def solve(self, A): sum=0; for i in range (1,A+1): if (i%2==0):", "of all the even numbers between 1 and A. class Solution: # @param", "to find and return the sum of all the even numbers between 1", "need to find and return the sum of all the even numbers between", "class Solution: # @param A : integer # @return an integer def solve(self,", "find and return the sum of all the even numbers between 1 and", "//You are given an integer A, you need to find and return the", "are given an integer A, you need to find and return the sum", "the sum of all the even numbers between 1 and A. class Solution:", "@return an integer def solve(self, A): sum=0; for i in range (1,A+1): if", "1 and A. class Solution: # @param A : integer # @return an", "Solution: # @param A : integer # @return an integer def solve(self, A):", ": integer # @return an integer def solve(self, A): sum=0; for i in", "integer A, you need to find and return the sum of all the", "numbers between 1 and A. class Solution: # @param A : integer #" ]
[ "= False): if cls._trackable(o): return True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}'", "elif buffer is not None: if not isinstance(buffer, str): raise TypeError( f\"Invalid type", "first line is added for the indexing to start at 1 instead of", "of the (cumulative) character count.\"\"\" if not isinstance(offset, int) or offset < 0:", "self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__()", "\"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object, y:", "from lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo elif not isinstance(lexpos, int): raise", "indexing to start at 1 instead of 0. `self.table` keeps track of the", "fname is not None: if not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type", "= min(length, len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname is not", "int = 0, length: int = None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self,", "buffer) else: raise ValueError(\"Either 'fname' or 'buffer' must be provided.\") def __init__( self,", "represent the End-of-File for the given source object. It's an anonymously created EOFType", "-*- Tracking -*- def track(self, o: object, lexpos: int = None): \"\"\"\"\"\" setattr(o,", "self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname is not None) else \"<string>\" )", "must be a positive integer (int) or 'None'.\") self.offset = min(offset, len(self)) self.length", "\"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) )", "not o.source is None ): return False else: return True __all__ = [\"Source\"]", "for the indexing to start at 1 instead of 0. `self.table` keeps track", "int): raise TypeError(f\"'lexpos' must be an integer (int), not ({type(lexpos)}).\") elif not 0", "self.fpath = ( Path(fname).resolve(strict=True) if (fname is not None) else \"<string>\" ) self.lines", "or not isinstance(o.lexpos, int) or o.lexpos < 0 ): return False elif (", "raise ValueError( \"Can't work with both 'fname' and 'buffer' parameters, choose one option.\"", "position on exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__(", "both 'fname' and 'buffer' parameters, choose one option.\" ) elif fname is not", "track(self, o: object, lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not", "def getlex(self, lexpos: int = None) -> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\"", "None) def propagate(self, x: object, y: object, *, out: bool = False) ->", "track of the (cumulative) character count.\"\"\" if not isinstance(offset, int) or offset <", "a positive integer (int) or 'None'.\") self.offset = min(offset, len(self)) self.length = min(length,", "work with both 'fname' and 'buffer' parameters, choose one option.\" ) elif fname", "TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'.\" ) fpath", "# Standard Library import itertools as it from pathlib import Path class Source(str):", "source object. It's an anonymously created EOFType instance. \"\"\" eof = self.EOF() self.track(eof,", "f\"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'.\" ) return super(Source, cls).__new__(cls, buffer)", "isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must be 'str'", "*, strict: bool = False): if cls._trackable(o): return True elif strict: print(o, o.lexinfo)", "encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read()) elif buffer is not None: if", "mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read()) elif buffer is not None:", "if lexpos is None: return self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must", "not None) else \"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line)", "fname is not None and buffer is not None: raise ValueError( \"Can't work", "o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o:", "be a positive integer (int).\") elif length is None: length = len(self) elif", "return None else: raise TypeError( f\"Can't propagate lexinfo between types {type(x)} and {type(y)}\"", "buffer: str = None, offset: int = 0, length: int = None, ):", "\"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno - 1], \"source\": self, } def slice(self,", "( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" )", "\"lexinfo\") or not isinstance(o.lexinfo, dict): return False else: if any(key not in o.lexinfo", "return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno - 1], \"source\":", "@property def eof(self): \"\"\"Virtual object to represent the End-of-File for the given source", "\"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls, *, fname: str", "{target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property def", "<= self.length: return self.eof.lexinfo lexpos = lexpos + self.offset + 1 lineno =", "not None: if not isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'.", "len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname is not None) else", "\"source\": self, } def slice(self, offset: int = 0, length: int = None):", "tracking of tokens in order to indicate error position on exception handling. \"\"\"", "def track(self, o: object, lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if", "0 ): return False elif ( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int)", "string itself with additional features for position tracking. \"\"\" if fname is not", "return self[self.offset : self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value", "lexpos >= self.table[lineno]: lineno += 1 if lineno == len(self.table): return self.eof.lexinfo else:", "- 1], \"source\": self, } def slice(self, offset: int = 0, length: int", "at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return", "if not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must", "is a string itself with additional features for position tracking. \"\"\" if fname", "if not isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must be", "self.lines = [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for line in", "f\"{msg}\\n\" ) class EOF(object): pass @property def eof(self): \"\"\"Virtual object to represent the", "int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__,", "is None: return self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an", "= min(offset, len(self)) self.length = min(length, len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True)", "setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls,", "-> object | None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return", "bool = False): if cls._trackable(o): return True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object", "\"source\"} def __new__( cls, *, fname: str = None, buffer: str = None,", "return False @classmethod def _trackable(cls, o: object): if not hasattr(o, \"lexinfo\") or not", ": self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness", "else: return False @classmethod def _trackable(cls, o: object): if not hasattr(o, \"lexinfo\") or", "f\"{msg}\\n\" else: if name is not None: return ( f\"In '{self.fpath}' at line", "( Path(fname).resolve(strict=True) if (fname is not None) else \"<string>\" ) self.lines = [\"\"]", "'str' or 'Path'.\" ) fpath = Path(fname) if not fpath.exists() or not fpath.is_file():", "given source object. It's an anonymously created EOFType instance. \"\"\" eof = self.EOF()", "o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) )", "setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\":", "f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return ( f\"In '{self.fpath}' at", "cls).__new__(cls, file.read()) elif buffer is not None: if not isinstance(buffer, str): raise TypeError(", "length=length) def error(self, msg: str, *, target: object = None, name: str =", "this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\":", "with both 'fname' and 'buffer' parameters, choose one option.\" ) elif fname is", "return eof # -*- Tracking -*- def track(self, o: object, lexpos: int =", "offset=offset, length=length) def error(self, msg: str, *, target: object = None, name: str", "= 1 while lineno < len(self.table) and lexpos >= self.table[lineno]: lineno += 1", "len(self)) return eof # -*- Tracking -*- def track(self, o: object, lexpos: int", "ValueError(\"Either 'fname' or 'buffer' must be provided.\") def __init__( self, *, fname: str", "0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"])", "or not isinstance(o.chrpos, int) or o.chrpos < 0 ): return False elif (", "target is None or not self.trackable(target): if name is not None: return f\"In", "( not hasattr(o, \"source\") or not isinstance(o.source, Source) and not o.source is None", ") setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object, y: object, *, out: bool", "one option.\" ) elif fname is not None: if not isinstance(fname, (str, Path)):", "__init__( self, *, fname: str = None, buffer: str = None, offset: int", "None: raise ValueError( \"Can't work with both 'fname' and 'buffer' parameters, choose one", "f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() > 0 def getlex(self,", "or offset < 0: raise TypeError(\"'offset' must be a positive integer (int).\") elif", "= None, ): \"\"\"This object is a string itself with additional features for", "isinstance(o.lexinfo, dict): return False else: if any(key not in o.lexinfo for key in", "return self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an integer (int),", "or 'Path'.\" ) fpath = Path(fname) if not fpath.exists() or not fpath.is_file(): raise", "be a positive integer (int) or 'None'.\") self.offset = min(offset, len(self)) self.length =", "bool = False) -> object | None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo)", "0, \"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda", "lineno < len(self.table) and lexpos >= self.table[lineno]: lineno += 1 if lineno ==", "self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\"", "else: if name is not None: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\"", "character count.\"\"\" if not isinstance(offset, int) or offset < 0: raise TypeError(\"'offset' must", "@classmethod def trackable(cls, o: object, *, strict: bool = False): if cls._trackable(o): return", ") fpath = Path(fname) if not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file", "itertools as it from pathlib import Path class Source(str): \"\"\"This source code object", "in order to indicate error position on exception handling. \"\"\" LEXKEYS = {\"lexpos\",", "must be a positive integer (int).\") elif length is None: length = len(self)", "\"\"\"Virtual object to represent the End-of-File for the given source object. It's an", "Path class Source(str): \"\"\"This source code object aids the tracking of tokens in", "blank first line is added for the indexing to start at 1 instead", "not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an integer (int), not ({type(lexpos)}).\") elif", "for position tracking. \"\"\" if fname is not None and buffer is not", "parameters, choose one option.\" ) elif fname is not None: if not isinstance(fname,", "elif ( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos < 0", "str = None, offset: int = 0, length: int = None, ): \"\"\"This", "): \"\"\"Separates the source code in multiple lines. A blank first line is", "= Path(fname) if not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\")", "return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str, *, target: object = None,", "isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an integer (int), not ({type(lexpos)}).\") elif not", "or 'None'.\") self.offset = min(offset, len(self)) self.length = min(length, len(self) - self.offset) self.fpath", "self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for line in self.lines])) def __str__(self): return", "def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\":", "in self.lines])) def __str__(self): return self[self.offset : self.offset + self.length] def __repr__(self): return", "import itertools as it from pathlib import Path class Source(str): \"\"\"This source code", "if cls._trackable(o): return True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type", "an integer (int), not ({type(lexpos)}).\") elif not 0 <= lexpos <= self.length: return", "tokens in order to indicate error position on exception handling. \"\"\" LEXKEYS =", "handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls, *, fname:", "else \"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1)", "additional features for position tracking. \"\"\" if fname is not None and buffer", "None: if not isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must", "o.chrpos < 0 ): return False elif ( not hasattr(o, \"source\") or not", "raise ValueError(\"Either 'fname' or 'buffer' must be provided.\") def __init__( self, *, fname:", "elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}' is not trackable.\")", "1 instead of 0. `self.table` keeps track of the (cumulative) character count.\"\"\" if", "\"Can't work with both 'fname' and 'buffer' parameters, choose one option.\" ) elif", "return False elif ( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos", "f\"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'.\" ) fpath =", "int) or offset < 0: raise TypeError(\"'offset' must be a positive integer (int).\")", "file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read())", ") return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or 'buffer' must be", "( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\"", "raise TypeError(\"'length' must be a positive integer (int) or 'None'.\") self.offset = min(offset,", "this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None)", "int) or o.chrpos < 0 ): return False elif ( not hasattr(o, \"source\")", "\"__lextrack__\", None) def propagate(self, x: object, y: object, *, out: bool = False)", "order to indicate error position on exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\",", "lexpos - self.table[lineno - 1], \"source\": self, } def slice(self, offset: int =", "\"\"\" \"\"\" # Future Imports from __future__ import annotations # Standard Library import", "o: object): if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return False else:", "to start at 1 instead of 0. `self.table` keeps track of the (cumulative)", "not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else:", "elif length is None: length = len(self) elif not isinstance(length, int) or length", "lexpos + self.offset + 1 lineno = 1 while lineno < len(self.table) and", ") setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def", "None: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\"", "len(self) elif not isinstance(length, int) or length < 0: raise TypeError(\"'length' must be", "self.length: return self.eof.lexinfo lexpos = lexpos + self.offset + 1 lineno = 1", "True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}' is not", "not None: if not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type '{type(fname)}' for", "'{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read()) elif buffer", "not isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'.\"", "is added for the indexing to start at 1 instead of 0. `self.table`", "= None) -> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos is None:", "{ \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno - 1], \"source\": self,", "not isinstance(length, int) or length < 0: raise TypeError(\"'length' must be a positive", "{type(x)} and {type(y)}\" ) @classmethod def trackable(cls, o: object, *, strict: bool =", "): return False elif ( not hasattr(o, \"source\") or not isinstance(o.source, Source) and", "not isinstance(o.lineno, int) or o.lineno < 0 ): return False elif ( not", "property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object, y: object,", "lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno - 1], \"source\": self, } def", "< 0 ): return False elif ( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos,", "TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'.\" ) return super(Source, cls).__new__(cls,", "(cumulative) character count.\"\"\" if not isinstance(offset, int) or offset < 0: raise TypeError(\"'offset'", "= None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\",", "else: raise TypeError( f\"Can't propagate lexinfo between types {type(x)} and {type(y)}\" ) @classmethod", "property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__,", "cls.LEXKEYS): return False else: if ( not hasattr(o, \"lineno\") or not isinstance(o.lineno, int)", "+ self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for line in self.lines])) def __str__(self):", "{target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return ( f\"In", "o.lexinfo for key in cls.LEXKEYS): return False else: if ( not hasattr(o, \"lineno\")", "this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod", "else: if ( not hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or o.lineno <", "of type '{type(o)}' is not trackable.\") else: return False @classmethod def _trackable(cls, o:", "fname: str = None, buffer: str = None, offset: int = 0, length:", "be provided.\") def __init__( self, *, fname: str = None, buffer: str =", "= len(self) elif not isinstance(length, int) or length < 0: raise TypeError(\"'length' must", "0, length: int = None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str,", "buffer is not None: raise ValueError( \"Can't work with both 'fname' and 'buffer'", "not isinstance(offset, int) or offset < 0: raise TypeError(\"'offset' must be a positive", "+ 1 lineno = 1 while lineno < len(self.table) and lexpos >= self.table[lineno]:", "self.length = min(length, len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname is", "== len(self.table): return self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos", "or o.lexpos < 0 ): return False elif ( not hasattr(o, \"chrpos\") or", "instance. \"\"\" eof = self.EOF() self.track(eof, len(self)) return eof # -*- Tracking -*-", "* target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\"", "cls._trackable(o): return True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}'", "lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos'", "self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an integer (int), not", "the tracking of tokens in order to indicate error position on exception handling.", "self.__len__() > 0 def getlex(self, lexpos: int = None) -> dict: \"\"\"Retrieves lexinfo", "None, ): \"\"\"Separates the source code in multiple lines. A blank first line", "f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is", "this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object, y: object, *, out:", "= [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for line in self.lines]))", "len(self.table) and lexpos >= self.table[lineno]: lineno += 1 if lineno == len(self.table): return", "None): if target is None or not self.trackable(target): if name is not None:", "f\"Can't propagate lexinfo between types {type(x)} and {type(y)}\" ) @classmethod def trackable(cls, o:", "_trackable(cls, o: object): if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return False", "created EOFType instance. \"\"\" eof = self.EOF() self.track(eof, len(self)) return eof # -*-", "ValueError( \"Can't work with both 'fname' and 'buffer' parameters, choose one option.\" )", "def trackable(cls, o: object, *, strict: bool = False): if cls._trackable(o): return True", "error(self, msg: str, *, target: object = None, name: str = None): if", "f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" )", "o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) )", "else: return None else: raise TypeError( f\"Can't propagate lexinfo between types {type(x)} and", "super(Source, cls).__new__(cls, file.read()) elif buffer is not None: if not isinstance(buffer, str): raise", "line in self.lines])) def __str__(self): return self[self.offset : self.offset + self.length] def __repr__(self):", "print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}' is not trackable.\") else: return", "for 'fname'. Must be 'str' or 'Path'.\" ) fpath = Path(fname) if not", "it from pathlib import Path class Source(str): \"\"\"This source code object aids the", "if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return False else: if any(key", "name is not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\"", ") @classmethod def trackable(cls, o: object, *, strict: bool = False): if cls._trackable(o):", "f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property def eof(self):", "Source(str): \"\"\"This source code object aids the tracking of tokens in order to", "self.EOF() self.track(eof, len(self)) return eof # -*- Tracking -*- def track(self, o: object,", "\"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o: object):", "End-of-File for the given source object. It's an anonymously created EOFType instance. \"\"\"", "if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__,", "out: bool = False) -> object | None: if self.trackable(x, strict=True) and self.trackable(y):", "1], \"source\": self, } def slice(self, offset: int = 0, length: int =", "None and buffer is not None: raise ValueError( \"Can't work with both 'fname'", "if lineno == len(self.table): return self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\": lexpos,", "import annotations # Standard Library import itertools as it from pathlib import Path", "isinstance(o.lineno, int) or o.lineno < 0 ): return False elif ( not hasattr(o,", "not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must be", "self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno -", "emptiness checking.\"\"\" return self.__len__() > 0 def getlex(self, lexpos: int = None) ->", "< 0 ): return False elif ( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos,", ") self.lines = [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for line", "indicate error position on exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"}", "offset: int = 0, length: int = None, ): \"\"\"This object is a", "return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}:", "if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else: return None", "EOFType instance. \"\"\" eof = self.EOF() self.track(eof, len(self)) return eof # -*- Tracking", "< 0 ): return False elif ( not hasattr(o, \"source\") or not isinstance(o.source,", "not trackable.\") else: return False @classmethod def _trackable(cls, o: object): if not hasattr(o,", "integer (int).\") elif length is None: length = len(self) elif not isinstance(length, int)", "= False) -> object | None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if", "must be an integer (int), not ({type(lexpos)}).\") elif not 0 <= lexpos <=", "dictionary from lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo elif not isinstance(lexpos, int):", "setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x:", "length: int = None, ): \"\"\"Separates the source code in multiple lines. A", "isinstance(length, int) or length < 0: raise TypeError(\"'length' must be a positive integer", "if (fname is not None) else \"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\")", "A blank first line is added for the indexing to start at 1", "at 1 instead of 0. `self.table` keeps track of the (cumulative) character count.\"\"\"", "setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"])", "= None, name: str = None): if target is None or not self.trackable(target):", "TypeError(f\"Object '{o}' of type '{type(o)}' is not trackable.\") else: return False @classmethod def", "): return False elif ( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or", "lineno += 1 if lineno == len(self.table): return self.eof.lexinfo else: return { \"lineno\":", ") class EOF(object): pass @property def eof(self): \"\"\"Virtual object to represent the End-of-File", "None, name: str = None): if target is None or not self.trackable(target): if", "is None: length = len(self) elif not isinstance(length, int) or length < 0:", "not in o.lexinfo for key in cls.LEXKEYS): return False else: if ( not", "{\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls, *, fname: str = None, buffer:", "return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\"", "cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or 'buffer' must be provided.\") def __init__(", "not None: raise ValueError( \"Can't work with both 'fname' and 'buffer' parameters, choose", "\"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() > 0 def getlex(self, lexpos: int =", "( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos < 0 ):", "'None'.\") self.offset = min(offset, len(self)) self.length = min(length, len(self) - self.offset) self.fpath =", "It's an anonymously created EOFType instance. \"\"\" eof = self.EOF() self.track(eof, len(self)) return", "str = None, offset: int = 0, length: int = None, ): \"\"\"Separates", "lines. A blank first line is added for the indexing to start at", "elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an integer (int), not ({type(lexpos)}).\")", "as it from pathlib import Path class Source(str): \"\"\"This source code object aids", "return self.__len__() > 0 def getlex(self, lexpos: int = None) -> dict: \"\"\"Retrieves", "\"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos < 0 ): return False elif", "self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr(", "multiple lines. A blank first line is added for the indexing to start", "= None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str, *, target: object", "len(self)) self.length = min(length, len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname", "name: str = None): if target is None or not self.trackable(target): if name", "False @classmethod def _trackable(cls, o: object): if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo,", "a string itself with additional features for position tracking. \"\"\" if fname is", "propagate(self, x: object, y: object, *, out: bool = False) -> object |", "' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property def eof(self): \"\"\"Virtual object", "object. It's an anonymously created EOFType instance. \"\"\" eof = self.EOF() self.track(eof, len(self))", "not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\",", "and {type(y)}\" ) @classmethod def trackable(cls, o: object, *, strict: bool = False):", "raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source,", "LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls, *, fname: str =", "is not None) else \"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\") self.table =", "the source code in multiple lines. A blank first line is added for", "elif fname is not None: if not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid", "None: return self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be an integer", "y else: return None else: raise TypeError( f\"Can't propagate lexinfo between types {type(x)}", "TypeError(\"'offset' must be a positive integer (int).\") elif length is None: length =", "= None, buffer: str = None, offset: int = 0, length: int =", "+ self.offset + 1 lineno = 1 while lineno < len(self.table) and lexpos", "\"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__,", "False): if cls._trackable(o): return True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of", "= None, ): \"\"\"Separates the source code in multiple lines. A blank first", "for the given source object. It's an anonymously created EOFType instance. \"\"\" eof", "\"\"\"This source code object aids the tracking of tokens in order to indicate", "a positive integer (int).\") elif length is None: length = len(self) elif not", "length: int = None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str, *,", "self, } def slice(self, offset: int = 0, length: int = None): return", "source code object aids the tracking of tokens in order to indicate error", "from __future__ import annotations # Standard Library import itertools as it from pathlib", "raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'.\" ) return super(Source,", "{\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr(", "o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}' is not trackable.\") else: return False", "property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o: object): setattr(o,", "Path)): raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'.\"", "return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or 'buffer' must be provided.\")", "EOF(object): pass @property def eof(self): \"\"\"Virtual object to represent the End-of-File for the", "False elif ( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos <", "count.\"\"\" if not isinstance(offset, int) or offset < 0: raise TypeError(\"'offset' must be", "object, *, out: bool = False) -> object | None: if self.trackable(x, strict=True)", "raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'.\" )", "= {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls, *, fname: str = None,", "int = None) -> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos is", "else: raise ValueError(\"Either 'fname' or 'buffer' must be provided.\") def __init__( self, *,", "offset: int = 0, length: int = None): return self.__class__(fname=self.fpath, offset=offset, length=length) def", "\"lineno\", \"source\"} def __new__( cls, *, fname: str = None, buffer: str =", "hasattr(o, \"source\") or not isinstance(o.source, Source) and not o.source is None ): return", "o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\": None}) if", "if target is None or not self.trackable(target): if name is not None: return", "not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file:", "self.trackable(target): if name is not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return", "str): raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'.\" ) return", "cls, *, fname: str = None, buffer: str = None, offset: int =", "isinstance(o.chrpos, int) or o.chrpos < 0 ): return False elif ( not hasattr(o,", "length < 0: raise TypeError(\"'length' must be a positive integer (int) or 'None'.\")", "be 'str' or 'Path'.\" ) fpath = Path(fname) if not fpath.exists() or not", "0: raise TypeError(\"'offset' must be a positive integer (int).\") elif length is None:", "\"lineno\") or not isinstance(o.lineno, int) or o.lineno < 0 ): return False elif", "list(it.accumulate([(len(line) + 1) for line in self.lines])) def __str__(self): return self[self.offset : self.offset", "source code in multiple lines. A blank first line is added for the", "must be provided.\") def __init__( self, *, fname: str = None, buffer: str", "and lexpos >= self.table[lineno]: lineno += 1 if lineno == len(self.table): return self.eof.lexinfo", "'{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object):", "strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}' is not trackable.\") else:", "hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos < 0 ): return False", "(str, Path)): raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or", "\"source\") or not isinstance(o.source, Source) and not o.source is None ): return False", "return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not None: return ( f\"In", "def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() >", "or 'buffer' must be provided.\") def __init__( self, *, fname: str = None,", "exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls, *,", "None: length = len(self) elif not isinstance(length, int) or length < 0: raise", "type '{type(buffer)}' for 'buffer'. Must be 'str'.\" ) return super(Source, cls).__new__(cls, buffer) else:", "lexpos is None: return self.eof.lexinfo elif not isinstance(lexpos, int): raise TypeError(f\"'lexpos' must be", "return False else: if any(key not in o.lexinfo for key in cls.LEXKEYS): return", "self, *, fname: str = None, buffer: str = None, offset: int =", "position tracking. \"\"\" if fname is not None and buffer is not None:", "return False elif ( not hasattr(o, \"source\") or not isinstance(o.source, Source) and not", "and 'buffer' parameters, choose one option.\" ) elif fname is not None: if", "None) @classmethod def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\":", "for line in self.lines])) def __str__(self): return self[self.offset : self.offset + self.length] def", "pathlib import Path class Source(str): \"\"\"This source code object aids the tracking of", "lineno = 1 while lineno < len(self.table) and lexpos >= self.table[lineno]: lineno +=", "0 ): return False elif ( not hasattr(o, \"source\") or not isinstance(o.source, Source)", "self.table[lineno - 1], \"source\": self, } def slice(self, offset: int = 0, length:", "the (cumulative) character count.\"\"\" if not isinstance(offset, int) or offset < 0: raise", "def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() > 0 def getlex(self, lexpos:", "'fname'. Must be 'str' or 'Path'.\" ) fpath = Path(fname) if not fpath.exists()", "{msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not None: return", "[\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for line in self.lines])) def", "target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\"", "= None, offset: int = 0, length: int = None, ): \"\"\"Separates the", "def __str__(self): return self[self.offset : self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def", "(int).\") elif length is None: length = len(self) elif not isinstance(length, int) or", "return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() > 0 def", "\"\"\" if fname is not None and buffer is not None: raise ValueError(", "if not isinstance(offset, int) or offset < 0: raise TypeError(\"'offset' must be a", "'buffer' must be provided.\") def __init__( self, *, fname: str = None, buffer:", "line is added for the indexing to start at 1 instead of 0.", "< 0: raise TypeError(\"'offset' must be a positive integer (int).\") elif length is", "self.table[lineno]: lineno += 1 if lineno == len(self.table): return self.eof.lexinfo else: return {", "f\"{name}: {msg}\\n\" ) else: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{'", "and not o.source is None ): return False else: return True __all__ =", "in multiple lines. A blank first line is added for the indexing to", "-> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo", "offset < 0: raise TypeError(\"'offset' must be a positive integer (int).\") elif length", "if name is not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In", "return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name", "int = 0, length: int = None, ): \"\"\"This object is a string", "anonymously created EOFType instance. \"\"\" eof = self.EOF() self.track(eof, len(self)) return eof #", "object to represent the End-of-File for the given source object. It's an anonymously", "msg: str, *, target: object = None, name: str = None): if target", "0 ): return False elif ( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int)", "'buffer' parameters, choose one option.\" ) elif fname is not None: if not", "if any(key not in o.lexinfo for key in cls.LEXKEYS): return False else: if", "type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'.\" ) fpath = Path(fname)", "'{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not None: return ( f\"In '{self.fpath}' at", "'{type(buffer)}' for 'buffer'. Must be 'str'.\" ) return super(Source, cls).__new__(cls, buffer) else: raise", "hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos < 0 ): return False", "\"lineno\": 0, \"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\",", "any(key not in o.lexinfo for key in cls.LEXKEYS): return False else: if (", "None, offset: int = 0, length: int = None, ): \"\"\"This object is", "1) for line in self.lines])) def __str__(self): return self[self.offset : self.offset + self.length]", "'fname' and 'buffer' parameters, choose one option.\" ) elif fname is not None:", "str = None): if target is None or not self.trackable(target): if name is", "{type(y)}\" ) @classmethod def trackable(cls, o: object, *, strict: bool = False): if", "0 <= lexpos <= self.length: return self.eof.lexinfo lexpos = lexpos + self.offset +", "if name is not None: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\"", "slice(self, offset: int = 0, length: int = None): return self.__class__(fname=self.fpath, offset=offset, length=length)", "in cls.LEXKEYS): return False else: if ( not hasattr(o, \"lineno\") or not isinstance(o.lineno,", "option.\" ) elif fname is not None: if not isinstance(fname, (str, Path)): raise", "( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos < 0 ):", "path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read()) elif", "dict): return False else: if any(key not in o.lexinfo for key in cls.LEXKEYS):", "object is a string itself with additional features for position tracking. \"\"\" if", ") else: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' *", "start at 1 instead of 0. `self.table` keeps track of the (cumulative) character", "# Future Imports from __future__ import annotations # Standard Library import itertools as", "self.offset + 1 lineno = 1 while lineno < len(self.table) and lexpos >=", "an anonymously created EOFType instance. \"\"\" eof = self.EOF() self.track(eof, len(self)) return eof", "this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\",", "code in multiple lines. A blank first line is added for the indexing", "f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property def eof(self): \"\"\"Virtual", "object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\": None}) if not", "lineno == len(self.table): return self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\":", "class Source(str): \"\"\"This source code object aids the tracking of tokens in order", "strict: bool = False): if cls._trackable(o): return True elif strict: print(o, o.lexinfo) raise", "\"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr(", "return self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno", "o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object,", "elif not 0 <= lexpos <= self.length: return self.eof.lexinfo lexpos = lexpos +", "on exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def __new__( cls,", "o: object, *, strict: bool = False): if cls._trackable(o): return True elif strict:", "False elif ( not hasattr(o, \"source\") or not isinstance(o.source, Source) and not o.source", "features for position tracking. \"\"\" if fname is not None and buffer is", "f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class", "or o.lineno < 0 ): return False elif ( not hasattr(o, \"lexpos\") or", "this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def", "is not None: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' '", "object, *, strict: bool = False): if cls._trackable(o): return True elif strict: print(o,", "@classmethod def _trackable(cls, o: object): if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict):", "as file: return super(Source, cls).__new__(cls, file.read()) elif buffer is not None: if not", "self.eof.lexinfo lexpos = lexpos + self.offset + 1 lineno = 1 while lineno", "if out: return y else: return None else: raise TypeError( f\"Can't propagate lexinfo", "not None and buffer is not None: raise ValueError( \"Can't work with both", "*, fname: str = None, buffer: str = None, offset: int = 0,", "and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else: return None else: raise TypeError(", "Tracking -*- def track(self, o: object, lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\",", "Standard Library import itertools as it from pathlib import Path class Source(str): \"\"\"This", "choose one option.\" ) elif fname is not None: if not isinstance(fname, (str,", "this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda", "else: if any(key not in o.lexinfo for key in cls.LEXKEYS): return False else:", ") elif fname is not None: if not isinstance(fname, (str, Path)): raise TypeError(", "setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__,", "not 0 <= lexpos <= self.length: return self.eof.lexinfo lexpos = lexpos + self.offset", "fpath = Path(fname) if not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path", "\"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) + 1) for", "return self.eof.lexinfo lexpos = lexpos + self.offset + 1 lineno = 1 while", "f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not None:", "\"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos < 0 ): return False elif", "with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read()) elif buffer is", "checking.\"\"\" return self.__len__() > 0 def getlex(self, lexpos: int = None) -> dict:", "lexinfo dictionary from lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo elif not isinstance(lexpos,", "provided.\") def __init__( self, *, fname: str = None, buffer: str = None,", "f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not None: return ( f\"In '{self.fpath}'", "({type(lexpos)}).\") elif not 0 <= lexpos <= self.length: return self.eof.lexinfo lexpos = lexpos", "y: object, *, out: bool = False) -> object | None: if self.trackable(x,", "to represent the End-of-File for the given source object. It's an anonymously created", "'{type(fname)}' for 'fname'. Must be 'str' or 'Path'.\" ) fpath = Path(fname) if", "to indicate error position on exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\",", "TypeError(\"'length' must be a positive integer (int) or 'None'.\") self.offset = min(offset, len(self))", "class EOF(object): pass @property def eof(self): \"\"\"Virtual object to represent the End-of-File for", ") setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self,", "str = None, buffer: str = None, offset: int = 0, length: int", ") setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this:", "or not isinstance(o.source, Source) and not o.source is None ): return False else:", "\"\"\" eof = self.EOF() self.track(eof, len(self)) return eof # -*- Tracking -*- def", "= 0, length: int = None, ): \"\"\"Separates the source code in multiple", "elif not isinstance(length, int) or length < 0: raise TypeError(\"'length' must be a", "lexpos <= self.length: return self.eof.lexinfo lexpos = lexpos + self.offset + 1 lineno", "False elif ( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos <", "(int), not ({type(lexpos)}).\") elif not 0 <= lexpos <= self.length: return self.eof.lexinfo lexpos", "else: return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno - 1],", "or o.chrpos < 0 ): return False elif ( not hasattr(o, \"source\") or", "\"\"\" # Future Imports from __future__ import annotations # Standard Library import itertools", "object, lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"):", "\"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this:", "or not isinstance(o.lineno, int) or o.lineno < 0 ): return False elif (", "\"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos - self.table[lineno - 1], \"source\": self, }", "None) else \"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\") self.table = list(it.accumulate([(len(line) +", "'{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not", "positive integer (int) or 'None'.\") self.offset = min(offset, len(self)) self.length = min(length, len(self)", "{msg}\\n\" ) else: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' '", "setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object, y: object, *, out: bool =", "isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'.\" )", "int = None, ): \"\"\"Separates the source code in multiple lines. A blank", "setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"])", "0 def getlex(self, lexpos: int = None) -> dict: \"\"\"Retrieves lexinfo dictionary from", "this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\",", "- self.table[lineno - 1], \"source\": self, } def slice(self, offset: int = 0,", "x: object, y: object, *, out: bool = False) -> object | None:", "eof(self): \"\"\"Virtual object to represent the End-of-File for the given source object. It's", "| None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else:", "False) -> object | None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out:", "return super(Source, cls).__new__(cls, file.read()) elif buffer is not None: if not isinstance(buffer, str):", "def __new__( cls, *, fname: str = None, buffer: str = None, offset:", "Path(fname).resolve(strict=True) if (fname is not None) else \"<string>\" ) self.lines = [\"\"] +", "raise TypeError( f\"Can't propagate lexinfo between types {type(x)} and {type(y)}\" ) @classmethod def", "None) -> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos is None: return", "lexpos = lexpos + self.offset + 1 lineno = 1 while lineno <", "is None or not self.trackable(target): if name is not None: return f\"In '{self.fpath}':\\n\"", "with additional features for position tracking. \"\"\" if fname is not None and", "int) or length < 0: raise TypeError(\"'length' must be a positive integer (int)", "+ 1) for line in self.lines])) def __str__(self): return self[self.offset : self.offset +", "object | None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y", "not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return False else: if any(key not", "not ({type(lexpos)}).\") elif not 0 <= lexpos <= self.length: return self.eof.lexinfo lexpos =", "is not None: raise ValueError( \"Can't work with both 'fname' and 'buffer' parameters,", "this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o: object): setattr(o, \"lexinfo\",", "lexpos: int = None) -> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos", "= None, offset: int = 0, length: int = None, ): \"\"\"This object", "object = None, name: str = None): if target is None or not", "__bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() > 0 def getlex(self, lexpos: int", "Future Imports from __future__ import annotations # Standard Library import itertools as it", "(fname is not None) else \"<string>\" ) self.lines = [\"\"] + self.split(\"\\n\") self.table", "< 0: raise TypeError(\"'length' must be a positive integer (int) or 'None'.\") self.offset", "not None: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' *", "__str__(self): return self[self.offset : self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self):", "setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"])", "type '{type(o)}' is not trackable.\") else: return False @classmethod def _trackable(cls, o: object):", "= 0, length: int = None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg:", "object): if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return False else: if", "or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as", "): return False elif ( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or", "'fname' or 'buffer' must be provided.\") def __init__( self, *, fname: str =", "fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\")", "hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda", "and buffer is not None: raise ValueError( \"Can't work with both 'fname' and", "def _trackable(cls, o: object): if not hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return", "y.lexinfo.update(x.lexinfo) if out: return y else: return None else: raise TypeError( f\"Can't propagate", "hasattr(o, \"lexinfo\") or not isinstance(o.lexinfo, dict): return False else: if any(key not in", "FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls,", "Path(fname) if not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with", "not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos < 0 ): return", "if fname is not None and buffer is not None: raise ValueError( \"Can't", "'{type(o)}' is not trackable.\") else: return False @classmethod def _trackable(cls, o: object): if", "pass @property def eof(self): \"\"\"Virtual object to represent the End-of-File for the given", "length is None: length = len(self) elif not isinstance(length, int) or length <", "o.lineno < 0 ): return False elif ( not hasattr(o, \"lexpos\") or not", "\"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this:", "def propagate(self, x: object, y: object, *, out: bool = False) -> object", "this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\", None) def propagate(self, x: object, y: object, *,", "length: int = None, ): \"\"\"This object is a string itself with additional", "name is not None: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{'", "line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property", "object, y: object, *, out: bool = False) -> object | None: if", "elif ( not hasattr(o, \"source\") or not isinstance(o.source, Source) and not o.source is", "the End-of-File for the given source object. It's an anonymously created EOFType instance.", "1 lineno = 1 while lineno < len(self.table) and lexpos >= self.table[lineno]: lineno", "self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str, *, target: object = None, name:", "raise TypeError(f\"Object '{o}' of type '{type(o)}' is not trackable.\") else: return False @classmethod", "object aids the tracking of tokens in order to indicate error position on", "*, target: object = None, name: str = None): if target is None", "\"__lextrack__\", None) @classmethod def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0,", "self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else: return None else:", "import Path class Source(str): \"\"\"This source code object aids the tracking of tokens", "not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos < 0 ): return", "Imports from __future__ import annotations # Standard Library import itertools as it from", "min(length, len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname is not None)", "eof = self.EOF() self.track(eof, len(self)) return eof # -*- Tracking -*- def track(self,", "between types {type(x)} and {type(y)}\" ) @classmethod def trackable(cls, o: object, *, strict:", "code object aids the tracking of tokens in order to indicate error position", "return False else: if ( not hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or", "for emptiness checking.\"\"\" return self.__len__() > 0 def getlex(self, lexpos: int = None)", "self.offset = min(offset, len(self)) self.length = min(length, len(self) - self.offset) self.fpath = (", "= lexpos + self.offset + 1 lineno = 1 while lineno < len(self.table)", "line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return (", "= 0, length: int = None, ): \"\"\"This object is a string itself", "self.track(eof, len(self)) return eof # -*- Tracking -*- def track(self, o: object, lexpos:", "this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda", "offset: int = 0, length: int = None, ): \"\"\"Separates the source code", "= None): if target is None or not self.trackable(target): if name is not", "' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return ( f\"In '{self.fpath}' at line", "'Path'.\" ) fpath = Path(fname) if not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid", "None, buffer: str = None, offset: int = 0, length: int = None,", "-*- def track(self, o: object, lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos))", "not self.trackable(target): if name is not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else:", "None, ): \"\"\"This object is a string itself with additional features for position", "be an integer (int), not ({type(lexpos)}).\") elif not 0 <= lexpos <= self.length:", "file.read()) elif buffer is not None: if not isinstance(buffer, str): raise TypeError( f\"Invalid", "for key in cls.LEXKEYS): return False else: if ( not hasattr(o, \"lineno\") or", "itself with additional features for position tracking. \"\"\" if fname is not None", "Must be 'str' or 'Path'.\" ) fpath = Path(fname) if not fpath.exists() or", "the indexing to start at 1 instead of 0. `self.table` keeps track of", "None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda", "Must be 'str'.\" ) return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or", "> 0 def getlex(self, lexpos: int = None) -> dict: \"\"\"Retrieves lexinfo dictionary", "None or not self.trackable(target): if name is not None: return f\"In '{self.fpath}':\\n\" f\"{name}:", "else: return ( f\"In '{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\"", "0, \"lineno\": 0, \"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__,", "None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr(", "self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else: return None else: raise TypeError( f\"Can't", "out: return y else: return None else: raise TypeError( f\"Can't propagate lexinfo between", "= ( Path(fname).resolve(strict=True) if (fname is not None) else \"<string>\" ) self.lines =", "is not trackable.\") else: return False @classmethod def _trackable(cls, o: object): if not", "key in cls.LEXKEYS): return False else: if ( not hasattr(o, \"lineno\") or not", "isinstance(o.source, Source) and not o.source is None ): return False else: return True", "tracking. \"\"\" if fname is not None and buffer is not None: raise", "be 'str'.\" ) return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or 'buffer'", "blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\": None})", "positive integer (int).\") elif length is None: length = len(self) elif not isinstance(length,", "<= lexpos <= self.length: return self.eof.lexinfo lexpos = lexpos + self.offset + 1", "types {type(x)} and {type(y)}\" ) @classmethod def trackable(cls, o: object, *, strict: bool", "fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return", "TypeError( f\"Can't propagate lexinfo between types {type(x)} and {type(y)}\" ) @classmethod def trackable(cls,", "of tokens in order to indicate error position on exception handling. \"\"\" LEXKEYS", "): \"\"\"This object is a string itself with additional features for position tracking.", "TypeError(f\"'lexpos' must be an integer (int), not ({type(lexpos)}).\") elif not 0 <= lexpos", "None else: raise TypeError( f\"Can't propagate lexinfo between types {type(x)} and {type(y)}\" )", "open(fpath, mode=\"r\", encoding=\"utf-8\") as file: return super(Source, cls).__new__(cls, file.read()) elif buffer is not", "lexpos, \"chrpos\": lexpos - self.table[lineno - 1], \"source\": self, } def slice(self, offset:", "def slice(self, offset: int = 0, length: int = None): return self.__class__(fname=self.fpath, offset=offset,", "def error(self, msg: str, *, target: object = None, name: str = None):", "return True elif strict: print(o, o.lexinfo) raise TypeError(f\"Object '{o}' of type '{type(o)}' is", "0. `self.table` keeps track of the (cumulative) character count.\"\"\" if not isinstance(offset, int)", "not isinstance(o.lexinfo, dict): return False else: if any(key not in o.lexinfo for key", "from pathlib import Path class Source(str): \"\"\"This source code object aids the tracking", "hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or o.lineno < 0 ): return False", "while lineno < len(self.table) and lexpos >= self.table[lineno]: lineno += 1 if lineno", ">= self.table[lineno]: lineno += 1 if lineno == len(self.table): return self.eof.lexinfo else: return", "integer (int) or 'None'.\") self.offset = min(offset, len(self)) self.length = min(length, len(self) -", "target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property def eof(self): \"\"\"Virtual object to represent", ") setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this:", "the given source object. It's an anonymously created EOFType instance. \"\"\" eof =", "if ( not hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or o.lineno < 0", "__repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return self.__len__() > 0", "trackable(cls, o: object, *, strict: bool = False): if cls._trackable(o): return True elif", "+= 1 if lineno == len(self.table): return self.eof.lexinfo else: return { \"lineno\": lineno,", "\"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo elif not", "- self.offset) self.fpath = ( Path(fname).resolve(strict=True) if (fname is not None) else \"<string>\"", "else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if name is not None: return (", "o: object, lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__,", "dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if lexpos is None: return self.eof.lexinfo elif", "eof # -*- Tracking -*- def track(self, o: object, lexpos: int = None):", "property(lambda this: this.lexinfo[\"lexpos\"]) ) setattr( o.__class__, \"source\", property(lambda this: this.lexinfo[\"source\"]) ) setattr(o.__class__, \"__lextrack__\",", "or not isinstance(o.lexinfo, dict): return False else: if any(key not in o.lexinfo for", "not isinstance(o.source, Source) and not o.source is None ): return False else: return", "def eof(self): \"\"\"Virtual object to represent the End-of-File for the given source object.", "keeps track of the (cumulative) character count.\"\"\" if not isinstance(offset, int) or offset", "# -*- Tracking -*- def track(self, o: object, lexpos: int = None): \"\"\"\"\"\"", "* target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass @property def eof(self): \"\"\"Virtual object to", "for 'buffer'. Must be 'str'.\" ) return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either", "'buffer'. Must be 'str'.\" ) return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname'", ") setattr(o.__class__, \"__lextrack__\", None) @classmethod def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0,", "\"chrpos\": lexpos - self.table[lineno - 1], \"source\": self, } def slice(self, offset: int", "'{o}' of type '{type(o)}' is not trackable.\") else: return False @classmethod def _trackable(cls,", "\"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr(", "raise TypeError(f\"'lexpos' must be an integer (int), not ({type(lexpos)}).\") elif not 0 <=", "None: if self.trackable(x, strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else: return", "= self.EOF() self.track(eof, len(self)) return eof # -*- Tracking -*- def track(self, o:", "} def slice(self, offset: int = 0, length: int = None): return self.__class__(fname=self.fpath,", "None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str, *, target: object =", "(int) or 'None'.\") self.offset = min(offset, len(self)) self.length = min(length, len(self) - self.offset)", "f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else: return ( f\"In '{self.fpath}'", "@classmethod def blank(cls, o: object): setattr(o, \"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0,", "= list(it.accumulate([(len(line) + 1) for line in self.lines])) def __str__(self): return self[self.offset :", "raise TypeError(\"'offset' must be a positive integer (int).\") elif length is None: length", "\"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this:", "buffer is not None: if not isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}'", "self.lines])) def __str__(self): return self[self.offset : self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\"", "int = None, ): \"\"\"This object is a string itself with additional features", "not isinstance(o.lexpos, int) or o.lexpos < 0 ): return False elif ( not", "is not None: if not isinstance(buffer, str): raise TypeError( f\"Invalid type '{type(buffer)}' for", "is not None and buffer is not None: raise ValueError( \"Can't work with", "< len(self.table) and lexpos >= self.table[lineno]: lineno += 1 if lineno == len(self.table):", "setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"])", "return False elif ( not hasattr(o, \"lexpos\") or not isinstance(o.lexpos, int) or o.lexpos", "error position on exception handling. \"\"\" LEXKEYS = {\"lexpos\", \"chrpos\", \"lineno\", \"source\"} def", "not isinstance(o.chrpos, int) or o.chrpos < 0 ): return False elif ( not", "Source) and not o.source is None ): return False else: return True __all__", "isinstance(o.lexpos, int) or o.lexpos < 0 ): return False elif ( not hasattr(o,", "\"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) )", "or not self.trackable(target): if name is not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\"", "not hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or o.lineno < 0 ): return", "trackable.\") else: return False @classmethod def _trackable(cls, o: object): if not hasattr(o, \"lexinfo\")", "annotations # Standard Library import itertools as it from pathlib import Path class", "target: object = None, name: str = None): if target is None or", "\"chrpos\", \"lineno\", \"source\"} def __new__( cls, *, fname: str = None, buffer: str", "integer (int), not ({type(lexpos)}).\") elif not 0 <= lexpos <= self.length: return self.eof.lexinfo", "'{self.fpath}' at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{name}: {msg}\\n\" ) else:", "is not None: if not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type '{type(fname)}'", "0, length: int = None, ): \"\"\"Separates the source code in multiple lines.", "self[self.offset : self.offset + self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for", "isinstance(offset, int) or offset < 0: raise TypeError(\"'offset' must be a positive integer", "o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__, \"lexpos\", property(lambda this: this.lexinfo[\"lexpos\"]) )", "self.table = list(it.accumulate([(len(line) + 1) for line in self.lines])) def __str__(self): return self[self.offset", "is not None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\"", "or length < 0: raise TypeError(\"'length' must be a positive integer (int) or", "len(self.table): return self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\": lexpos, \"chrpos\": lexpos -", "*, out: bool = False) -> object | None: if self.trackable(x, strict=True) and", "int = 0, length: int = None, ): \"\"\"Separates the source code in", "__new__( cls, *, fname: str = None, buffer: str = None, offset: int", "min(offset, len(self)) self.length = min(length, len(self) - self.offset) self.fpath = ( Path(fname).resolve(strict=True) if", "False else: if any(key not in o.lexinfo for key in cls.LEXKEYS): return False", "at line {target.lineno}:\\n\" f\"{self.lines[target.lineno]}\\n\" f\"{' ' * target.chrpos}^\\n\" f\"{msg}\\n\" ) class EOF(object): pass", "def __init__( self, *, fname: str = None, buffer: str = None, offset:", "1 while lineno < len(self.table) and lexpos >= self.table[lineno]: lineno += 1 if", "1 if lineno == len(self.table): return self.eof.lexinfo else: return { \"lineno\": lineno, \"lexpos\":", "if not fpath.exists() or not fpath.is_file(): raise FileNotFoundError(f\"Invalid file path '{fname}'.\") with open(fpath,", "\"lexinfo\", {\"chrpos\": 0, \"lineno\": 0, \"lexpos\": 0, \"source\": None}) if not hasattr(o.__class__, \"__lextrack__\"):", "super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or 'buffer' must be provided.\") def", "'str'.\" ) return super(Source, cls).__new__(cls, buffer) else: raise ValueError(\"Either 'fname' or 'buffer' must", "int) or o.lexpos < 0 ): return False elif ( not hasattr(o, \"chrpos\")", "+ self.length] def __repr__(self): return f\"{self.__class__.__name__}({self.fpath!r})\" def __bool__(self): \"\"\"Truth-value for emptiness checking.\"\"\" return", "0: raise TypeError(\"'length' must be a positive integer (int) or 'None'.\") self.offset =", "\"\"\"Separates the source code in multiple lines. A blank first line is added", "int) or o.lineno < 0 ): return False elif ( not hasattr(o, \"lexpos\")", "property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\", property(lambda this: this.lexinfo[\"lineno\"]) ) setattr( o.__class__,", "length = len(self) elif not isinstance(length, int) or length < 0: raise TypeError(\"'length'", "`self.table` keeps track of the (cumulative) character count.\"\"\" if not isinstance(offset, int) or", "lexpos: int = None): \"\"\"\"\"\" setattr(o, \"lexinfo\", self.getlex(lexpos)) if not hasattr(o.__class__, \"__lextrack__\"): setattr(", "int = None): return self.__class__(fname=self.fpath, offset=offset, length=length) def error(self, msg: str, *, target:", "propagate lexinfo between types {type(x)} and {type(y)}\" ) @classmethod def trackable(cls, o: object,", "o.lexpos < 0 ): return False elif ( not hasattr(o, \"chrpos\") or not", "( not hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or o.lineno < 0 ):", "0, length: int = None, ): \"\"\"This object is a string itself with", "None, offset: int = 0, length: int = None, ): \"\"\"Separates the source", "lexinfo between types {type(x)} and {type(y)}\" ) @classmethod def trackable(cls, o: object, *,", "Library import itertools as it from pathlib import Path class Source(str): \"\"\"This source", "False else: if ( not hasattr(o, \"lineno\") or not isinstance(o.lineno, int) or o.lineno", "of 0. `self.table` keeps track of the (cumulative) character count.\"\"\" if not isinstance(offset,", "getlex(self, lexpos: int = None) -> dict: \"\"\"Retrieves lexinfo dictionary from lexpos.\"\"\" if", "str, *, target: object = None, name: str = None): if target is", "aids the tracking of tokens in order to indicate error position on exception", "None: return f\"In '{self.fpath}':\\n\" f\"{name}: {msg}\\n\" else: return f\"In '{self.fpath}':\\n\" f\"{msg}\\n\" else: if", "return y else: return None else: raise TypeError( f\"Can't propagate lexinfo between types", "__future__ import annotations # Standard Library import itertools as it from pathlib import", "in o.lexinfo for key in cls.LEXKEYS): return False else: if ( not hasattr(o,", "not hasattr(o, \"source\") or not isinstance(o.source, Source) and not o.source is None ):", "not hasattr(o.__class__, \"__lextrack__\"): setattr( o.__class__, \"chrpos\", property(lambda this: this.lexinfo[\"chrpos\"]) ) setattr( o.__class__, \"lineno\",", "strict=True) and self.trackable(y): y.lexinfo.update(x.lexinfo) if out: return y else: return None else: raise", "elif ( not hasattr(o, \"chrpos\") or not isinstance(o.chrpos, int) or o.chrpos < 0", "\"\"\"This object is a string itself with additional features for position tracking. \"\"\"", "instead of 0. `self.table` keeps track of the (cumulative) character count.\"\"\" if not", "None: if not isinstance(fname, (str, Path)): raise TypeError( f\"Invalid type '{type(fname)}' for 'fname'.", "added for the indexing to start at 1 instead of 0. `self.table` keeps", "file: return super(Source, cls).__new__(cls, file.read()) elif buffer is not None: if not isinstance(buffer," ]
[ "ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class,", "'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value", "= mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'} ], 'title': 'Frying Up',", "db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app,", "<filename>tests/test_tasks.py from __future__ import absolute_import from __future__ import unicode_literals from contextlib import contextmanager", "[ {'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1',", "import datetime from flask import appcontext_pushed, g import mock from ogreserver.models.ebook import Ebook", "], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon =", "assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql,", "ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert", "query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to the Ebook", "with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg'", "== '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date']", "ensure all metadata applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author", "= Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.isbn13", "import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to the", "Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.asin ==", "user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): #", "started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata applied to the Ebook", "= mock.Mock() mock_amazon.search.return_value = None # late import inside Flask app_context from ogreserver.tasks", ") # ensure all metadata applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id)", "Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value", "'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None # late", "when only Amazon responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = {", "= mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date':", "'<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] ==", "test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3):", "mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None # late import inside Flask app_context from", "# late import inside Flask app_context from ogreserver.tasks import conversion_search conversion_search() assert mock_conversion.search.call_count", "Amazon responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>',", "Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def", "== 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object created", "= mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None # late import inside Flask app_context", "None # late import inside Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app,", "from contextlib import contextmanager import datetime from flask import appcontext_pushed, g import mock", "import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started mock_image_upload.delay.assert_called_once_with(", "assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def test_index_for_search():", "mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object", "flask_app, user, ebook_db_fixture_azw3): # late import inside Flask app_context from ogreserver.tasks import upload_ebook", "assert ebook.title == 'Frying Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>'", "task when only Amazon responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value =", "image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata applied", "from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied", "''' Test query_ebook_metadata task when only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value =", "{ 'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url':", "late import inside Flask app_context from ogreserver.tasks import conversion_search conversion_search() assert mock_conversion.search.call_count ==", "import inside Flask app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3',", "g import mock from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject", "mock_amazon.search.return_value = None # late import inside Flask app_context from ogreserver.tasks import query_ebook_metadata", "ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.isbn13 == '1234567890123' assert", "from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is", "unicode_literals from contextlib import contextmanager import datetime from flask import appcontext_pushed, g import", "ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert", "mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Goodreads responds '''", "'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value", "ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.asin == 'BXXX999999' assert", "postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to the Ebook object ebook =", "flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Amazon responds ''' mock_amazon", "mock_conversion = mock.Mock() # late import inside Flask app_context from ogreserver.tasks import conversion_search", "ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class,", "mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata applied to the Ebook object", "= None # late import inside Flask app_context from ogreserver.tasks import query_ebook_metadata with", "mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Amazon responds", "mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None # late import inside Flask app_context from", "== 'Frying Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title']", "{ 'authors': [ {'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99,", "user, ebook_db_fixture_azw3): # late import inside Flask app_context from ogreserver.tasks import upload_ebook upload_ebook(", "'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value =", "query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id)", "responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name':", "'4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None # late import", "1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object created from", "''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying", "@mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when", "Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.isbn13 ==", "'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI')", "from flask import appcontext_pushed, g import mock from ogreserver.models.ebook import Ebook @contextmanager def", "@mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object created from the Conversion()", "Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying", "assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28'", "== '<NAME>' assert ebook.title == 'Frying Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author']", "flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Goodreads responds ''' mock_goodreads", "== 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload():", "mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13':", "'1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session')", "test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import", "mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123',", "''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'}", "28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None #", "== '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating']", "is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata applied to the", "with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql,", "@mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata", "= mock.Mock() # late import inside Flask app_context from ogreserver.tasks import conversion_search conversion_search()", "yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test", "assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.asin == 'BXXX999999'", "all metadata applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author ==", "inject_db_session(app, db_session): ''' Inject the test SQLAlchemy Session into the Flask g object", "mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7,", "ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def", "responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title':", "'<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] ==", "only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors':", "== 'Frying Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title']", "ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def", "constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() # late import inside Flask app_context from", "assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata", "Test query_ebook_metadata task when only Amazon responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock()", "mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None # late import inside Flask", "app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert", "= { 'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28),", "@mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when", "Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() # late import inside Flask app_context", "== '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): '''", "= mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying Up', 'asin':", "7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None", "flask import appcontext_pushed, g import mock from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app,", "def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user,", "'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] ==", "test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object created from the Conversion() constructor mock_conversion_class.return_value", "'Frying Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] ==", "mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'} ],", "created from the Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() # late import", "upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session')", "the object created from the Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() #", "contextmanager import datetime from flask import appcontext_pushed, g import mock from ogreserver.models.ebook import", "Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload", "ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert", "'<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon", "inside Flask app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username", "__future__ import unicode_literals from contextlib import contextmanager import datetime from flask import appcontext_pushed,", "@mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task", "Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors': [", "== '4.1' def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store,", "import absolute_import from __future__ import unicode_literals from contextlib import contextmanager import datetime from", "mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Goodreads responds", "'authors': [ {'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating':", "mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999',", "# assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all", "assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class,", "'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg',", "late import inside Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id)", "assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up'", "the Flask g object ''' def _handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler,", "Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying", "db_session): ''' Inject the test SQLAlchemy Session into the Flask g object '''", "ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata applied to the Ebook object ebook", "mock the object created from the Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock()", "ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Amazon responds ''' mock_amazon = mock_amazon_class.return_value", "query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure", "{'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', }", "def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late", "== '<NAME>' assert ebook.title == 'Frying Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name']", "ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def test_index_for_search(): pass", "@mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import inside Flask app_context", "from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count", "ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload')", "'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db,", "mock from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject the test", "app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): '''", "assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up'", "'1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] ==", "def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only", "= mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'} ], 'title':", "'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app,", "''' def _handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI')", "import appcontext_pushed, g import mock from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session):", "mock_goodreads.search.return_value = None # late import inside Flask app_context from ogreserver.tasks import query_ebook_metadata", "import inside Flask app_context from ogreserver.tasks import conversion_search conversion_search() assert mock_conversion.search.call_count == 1", "def inject_db_session(app, db_session): ''' Inject the test SQLAlchemy Session into the Flask g", "# late import inside Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql):", "only Amazon responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author':", "'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass", "inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' )", "= { 'authors': [ {'name': '<NAME>'} ], 'title': 'Frying Up', 'isbn13': '1234567890123', 'num_pages':", "ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db,", "== 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload,", "import inside Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) #", "'<NAME>' assert ebook.title == 'Frying Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] ==", "ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert", "__future__ import absolute_import from __future__ import unicode_literals from contextlib import contextmanager import datetime", "Test query_ebook_metadata task when only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock()", "mock_conversion_class, flask_app): # mock the object created from the Conversion() constructor mock_conversion_class.return_value =", "'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads", "applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert", "assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999'", "appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3):", "from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject the test SQLAlchemy", "ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3):", "'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock()", "contextlib import contextmanager import datetime from flask import appcontext_pushed, g import mock from", "'<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', }", "'2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test", "def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only", "mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None # late import inside Flask", "assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the", "'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app):", "**kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class,", "''' Inject the test SQLAlchemy Session into the Flask g object ''' def", "ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value", "assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.isbn13 == '1234567890123'", "object ''' def _handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload')", "ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert", "'num_pages': 99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None", "mock.Mock() # late import inside Flask app_context from ogreserver.tasks import conversion_search conversion_search() assert", "task when only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value =", "into the Flask g object ''' def _handler(sender, **kwargs): g.db_session = db_session with", "'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads =", "flask_app): # mock the object created from the Conversion() constructor mock_conversion_class.return_value = mock_conversion", "app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata", "99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None #", "from the Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() # late import inside", "Flask g object ''' def _handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app):", "= mock.Mock() mock_goodreads.search.return_value = None # late import inside Flask app_context from ogreserver.tasks", "ebook_db_fixture_azw3): # late import inside Flask app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id,", "# ensure all metadata applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert", "datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value =", "when only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = {", "inside Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure", "_handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def", "import mock from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject the", "'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value", "@mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task", "import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1", "Up', 'isbn13': '1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock()", "'asin': 'BXXX999999', 'publication_date': datetime.date(2015, 7, 28), 'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value =", "app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task", "Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject the test SQLAlchemy Session into the", "Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all", "Session into the Flask g object ''' def _handler(sender, **kwargs): g.db_session = db_session", "def _handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI')", "== '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13']", "mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying Up', 'asin': 'BXXX999999', 'publication_date': datetime.date(2015,", "@mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import inside Flask", "appcontext_pushed, g import mock from ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session): '''", "postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) #", "g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class,", "late import inside Flask app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash,", "'image_url': 'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None # late", "mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = { 'authors': [ {'name': '<NAME>'} ], 'title': 'Frying", "import contextmanager import datetime from flask import appcontext_pushed, g import mock from ogreserver.models.ebook", "mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = { 'author': '<NAME>', 'title': 'Frying Up',", "import unicode_literals from contextlib import contextmanager import datetime from flask import appcontext_pushed, g", "= db_session with appcontext_pushed.connected_to(_handler, app): yield @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload,", "} mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value = None # late import inside", "task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id, 'http://example.com/egg.jpg' ) # ensure all metadata applied to", "= Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up' assert ebook.asin", "assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1'", "ogreserver.models.ebook import Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject the test SQLAlchemy Session", "} mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None # late import inside", "mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Amazon responds '''", "Inject the test SQLAlchemy Session into the Flask g object ''' def _handler(sender,", "mock.Mock() mock_amazon.search.return_value = None # late import inside Flask app_context from ogreserver.tasks import", "'Frying Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] ==", "with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to the Ebook object", "Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def", "assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up' assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123'", "'4.1' def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app,", "postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Goodreads responds ''' mock_goodreads =", "ebook.title == 'Frying Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert", "''' Test query_ebook_metadata task when only Amazon responds ''' mock_amazon = mock_amazon_class.return_value =", "SQLAlchemy Session into the Flask g object ''' def _handler(sender, **kwargs): g.db_session =", "the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title ==", "g object ''' def _handler(sender, **kwargs): g.db_session = db_session with appcontext_pushed.connected_to(_handler, app): yield", "= mock_conversion = mock.Mock() # late import inside Flask app_context from ogreserver.tasks import", "assert ebook.title == 'Frying Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>'", "@mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object created from the", "ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started", "= mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None # late import inside Flask app_context", "metadata applied to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>'", "Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>' assert ebook.provider_metadata['goodreads']['title'] == 'Frying", "def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import inside Flask app_context from", "def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock the object created from the Conversion() constructor", "mock_conversion_class.return_value = mock_conversion = mock.Mock() # late import inside Flask app_context from ogreserver.tasks", "object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title == 'Frying Up'", "== 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert ebook.provider_metadata['amazon']['title'] == 'Frying Up' assert ebook.provider_metadata['amazon']['asin']", "query_ebook_metadata task when only Goodreads responds ''' mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value", "the Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() # late import inside Flask", "'http://example.com/egg.jpg', } mock_goodreads = mock_goodreads_class.return_value = mock.Mock() mock_goodreads.search.return_value = None # late import", "pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import inside", "# late import inside Flask app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub',", "Flask app_context from ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username )", "@contextmanager def inject_db_session(app, db_session): ''' Inject the test SQLAlchemy Session into the Flask", "postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Amazon responds ''' mock_amazon =", "object created from the Conversion() constructor mock_conversion_class.return_value = mock_conversion = mock.Mock() # late", "test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Goodreads", "inside Flask app_context from ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert", ") assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion') @mock.patch('ogreserver.tasks.setup_db_session') def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app): # mock", "import Ebook @contextmanager def inject_db_session(app, db_session): ''' Inject the test SQLAlchemy Session into", "from __future__ import absolute_import from __future__ import unicode_literals from contextlib import contextmanager import", "'1234567890123', 'num_pages': 99, 'average_rating': '4.1', } mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value =", "query_ebook_metadata task when only Amazon responds ''' mock_amazon = mock_amazon_class.return_value = mock.Mock() mock_amazon.search.return_value", "'http://example.com/egg.jpg' ) # ensure all metadata applied to the Ebook object ebook =", "mock.Mock() mock_goodreads.search.return_value = None # late import inside Flask app_context from ogreserver.tasks import", "from __future__ import unicode_literals from contextlib import contextmanager import datetime from flask import", "inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to the Ebook object ebook", "'<NAME>' assert ebook.title == 'Frying Up' assert ebook.isbn13 == '1234567890123' assert ebook.provider_metadata['goodreads']['authors'][0]['name'] ==", "pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): #", "== 'Frying Up' assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999' assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28' @mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI')", "test SQLAlchemy Session into the Flask g object ''' def _handler(sender, **kwargs): g.db_session", "# mock the object created from the Conversion() constructor mock_conversion_class.return_value = mock_conversion =", "ogreserver.tasks import upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count ==", "ogreserver.tasks import query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # ensure all metadata applied to", "@mock.patch('ogreserver.tasks.image_upload') @mock.patch('ogreserver.tasks.GoodreadsAPI') @mock.patch('ogreserver.tasks.AmazonAPI') def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata", "upload_ebook upload_ebook( ebook_db_fixture_azw3.id, 'egg.epub', ebook_db_fixture_azw3.original_version.source_format.file_hash, 'azw3', user.username ) assert mock_s3_store.upload_ebook.call_count == 1 @mock.patch('ogreserver.tasks.Conversion')", "test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3): ''' Test query_ebook_metadata task when only Amazon", "the test SQLAlchemy Session into the Flask g object ''' def _handler(sender, **kwargs):", "to the Ebook object ebook = Ebook.query.get(ebook_db_fixture_azw3.id) assert ebook.author == '<NAME>' assert ebook.title", "query_ebook_metadata with inject_db_session(flask_app, postgresql): query_ebook_metadata(ebook_db_fixture_azw3.id) # assert image_upload task is started mock_image_upload.delay.assert_called_once_with( ebook_db_fixture_azw3.id,", "== '1234567890123' assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store')", "ebook.title == 'Frying Up' assert ebook.asin == 'BXXX999999' assert ebook.provider_metadata['amazon']['author'] == '<NAME>' assert", "mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import inside Flask app_context from ogreserver.tasks import", "absolute_import from __future__ import unicode_literals from contextlib import contextmanager import datetime from flask", "datetime from flask import appcontext_pushed, g import mock from ogreserver.models.ebook import Ebook @contextmanager", "assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1' def test_image_upload(): pass def test_index_for_search(): pass @mock.patch('ogreserver.tasks.s3_store') @mock.patch('ogreserver.tasks.setup_db_session') def", "test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3): # late import inside Flask app_context from ogreserver.tasks" ]
[ "% ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture(", "Unless required by applicable law or agreed to in writing, software # distributed", "docker.errors.ImageNotFound: # Image isn't on system so no worries pass def test_failed_image_build(self): temp", "self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture(", "for the specific language governing permissions and limitations # under the License. #", "is # not avaliable, as it is function parsing stream output from docker.", "docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on system", "imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException,", "% uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client:", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "the License. You may obtain # a copy of the License at #", "import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile'", "def setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self):", "+ e.errors: self.assertIn(line, debug_output) # Exception is currently raised in piece of code", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name)", "self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self):", "# This will be chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self):", "self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path,", "self.random_name self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors)", "uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try:", "'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname", "= docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' %", "= e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp", "retrieval of logs otherwise the API can sometimes return # an empty result", "with the License. You may obtain # a copy of the License at", "docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture all output to inspect, must delay", "import uuid import windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename,", "im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) #", "isn't on system so no worries pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir()", "e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) # Exception is currently", "test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as f:", "'w') as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT >", ") e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data)", "'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM", "# To capture all output to inspect, must delay removal until # after", "f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def", "windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname", "self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output)", "parsing stream output from docker. # This will be chaned in future. #", "def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) )", "use this file except in compliance with the License. You may obtain #", "f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD cat", "alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname =", "all output to inspect, must delay removal until # after retrieval of logs", "> content.txt\\n' 'CMD cat content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client", "is currently raised in piece of code where this info is # not", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "return # an empty result c = client.containers.create(im) try: c.start() result = c.wait()", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "not avaliable, as it is function parsing stream output from docker. # This", "= imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath =", "piece of code where this info is # not avaliable, as it is", "content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180)", "this info is # not avaliable, as it is function parsing stream output", "implied. See the # License for the specific language governing permissions and limitations", "windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix:", "= 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180)", "mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance(", "unittest.mock import uuid import windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self,", "uuid import windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None):", "it is function parsing stream output from docker. # This will be chaned", "self.assertIn(line, debug_output) # Exception is currently raised in piece of code where this", "tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' %", "dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename", "= '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp", "the specific language governing permissions and limitations # under the License. # import", "debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w')", "e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line", "% dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename = imagename def _setUp(self):", "you may # not use this file except in compliance with the License.", "1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name)", "as it is function parsing stream output from docker. # This will be", "governing permissions and limitations # under the License. # import docker import fixtures", "'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s'", "KIND, either express or implied. See the # License for the specific language", "output to inspect, must delay removal until # after retrieval of logs otherwise", "artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func,", "self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s' % (", "file except in compliance with the License. You may obtain # a copy", "test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact):", "with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "2018 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License,", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name", "fixtures import logging import pathlib import unittest import unittest.mock import uuid import windlass.exc", "result c = client.containers.create(im) try: c.start() result = c.wait() output = c.logs(stdout=True, stderr=True)", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "output from docker. # This will be chaned in future. # self.assertIsNotNone(e.artifact_name) #", "if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename", "self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' % temp.path, 'w') as f:", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath,", "imagename self.imagename = imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix()", "specific language governing permissions and limitations # under the License. # import docker", "so no worries pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile'", "temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n' ) e =", "be chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s'", "fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex", "self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in e.out", "the # License for the specific language governing permissions and limitations # under", "Development LP # # Licensed under the Apache License, Version 2.0 (the \"License\");", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact", "cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image", "-n $ARGUMENT > content.txt\\n' 'CMD cat content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path,", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable(", "'%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename = imagename def", "(c) Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed under the", "LP # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "is function parsing stream output from docker. # This will be chaned in", "= client.containers.create(im) try: c.start() result = c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop()", "required by applicable law or agreed to in writing, software # distributed under", "self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n'", "debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) # Exception", "= self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM", "applicable law or agreed to in writing, software # distributed under the License", "imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line,", "in compliance with the License. You may obtain # a copy of the", "artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname =", "or agreed to in writing, software # distributed under the License is distributed", "import unittest import unittest.mock import uuid import windlass.exc import windlass.images import tests.test_e2e class", ") with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit", "output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make sure completed successfully self.assertEqual(0,", "<reponame>LaudateCorpus1/windlass # # (c) Copyright 2018 Hewlett Packard Enterprise Development LP # #", "DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output =", "client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on system so no worries pass def", "temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile'", "self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message()", "language governing permissions and limitations # under the License. # import docker import", "dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) e", "= c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make sure completed", "alpine\\n' 'RUN exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile')", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "import pathlib import unittest import unittest.mock import uuid import windlass.exc import windlass.images import", "f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD", "self.imagename = imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath", "DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT',", "logging import pathlib import unittest import unittest.mock import uuid import windlass.exc import windlass.images", "f.write( 'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self):", "would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s' %", "import unittest.mock import uuid import windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def", "imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile'", "will be added after successful building of image, as # otherwise image delete", "DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName'", "'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path,", "To capture all output to inspect, must delay removal until # after retrieval", "self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "successful building of image, as # otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename)", "an empty result c = client.containers.create(im) try: c.start() result = c.wait() output =", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "system so no worries pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with", "in e.out + e.errors: self.assertIn(line, debug_output) # Exception is currently raised in piece", "e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture(", "# otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp()", "timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on system so", "Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed under the Apache", "2.0 (the \"License\"); you may # not use this file except in compliance", "= pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) #", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "will be chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname =", "def test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname)", "License, Version 2.0 (the \"License\"); you may # not use this file except", "= self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except", "% imagename self.imagename = imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path =", "mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact )", "of code where this info is # not avaliable, as it is function", "class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))", "test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises(", "DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else:", "dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be added after successful building", "docker import fixtures import logging import pathlib import unittest import unittest.mock import uuid", ") im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close)", "def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile", "else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename = imagename def _setUp(self): self.docker_client =", "self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple')", "timeout=180) self.addCleanup(client.close) # To capture all output to inspect, must delay removal until", "agreed to in writing, software # distributed under the License is distributed on", "fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' % temp.path, 'w')", "imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname)", "self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output", "'127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp =", "try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on system so no worries pass", "version='auto', timeout=180) self.addCleanup(client.close) # To capture all output to inspect, must delay removal", "self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile),", "'RUN exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out)", "the API can sometimes return # an empty result c = client.containers.create(im) try:", "self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger", "windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' )", "# Unless required by applicable law or agreed to in writing, software #", "dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename = imagename def _setUp(self): self.docker_client", "% temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n", "by applicable law or agreed to in writing, software # distributed under the", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' %", "dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in e.out +", "pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w')", "debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self):", "'CMD cat content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env(", "delay removal until # after retrieval of logs otherwise the API can sometimes", "dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture all output to", "echo -n $ARGUMENT > content.txt\\n' 'CMD cat content.txt' ) im = windlass.images.build_verbosly( self.random_name,", "cat content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto',", "Image isn't on system so no worries pass def test_failed_image_build(self): temp = self.useFixture(", "def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build(", "% self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock()", "imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self):", "self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) #", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write(", "function parsing stream output from docker. # This will be chaned in future.", "self.dockerfile = '%s.Dockerfile' % imagename self.imagename = imagename def _setUp(self): self.docker_client = docker.from_env(version='auto')", "image, as # otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def", "raised in piece of code where this info is # not avaliable, as", "self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' % temp.path,", "except in compliance with the License. You may obtain # a copy of", "pathlib import unittest import unittest.mock import uuid import windlass.exc import windlass.images import tests.test_e2e", ") windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture(", "c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make sure completed successfully self.assertEqual(0, result['StatusCode']) self.assertEqual('somevalue',", "super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't", "permissions and limitations # under the License. # import docker import fixtures import", "to in writing, software # distributed under the License is distributed on an", "code where this info is # not avaliable, as it is function parsing", "for line in e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture(", "__init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile =", "$ARGUMENT > content.txt\\n' 'CMD cat content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile')", ") def test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple'))", "= '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image,", "def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "try: c.start() result = c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() #", "test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def", "with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n'", "# not use this file except in compliance with the License. You may", "sometimes return # an empty result c = client.containers.create(im) try: c.start() result =", "e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output", "= c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make sure completed successfully self.assertEqual(0, result['StatusCode'])", "# License for the specific language governing permissions and limitations # under the", "'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n' ) e = self.assertRaises(", "with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN", ") self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' % temp.path, 'w') as", "'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD cat content.txt'", "import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "c = client.containers.create(im) try: c.start() result = c.wait() output = c.logs(stdout=True, stderr=True) finally:", "self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in", "in writing, software # distributed under the License is distributed on an \"AS", "Version 2.0 (the \"License\"); you may # not use this file except in", "mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s'", "except docker.errors.ImageNotFound: # Image isn't on system so no worries pass def test_failed_image_build(self):", "\"License\"); you may # not use this file except in compliance with the", "in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name", "# under the License. # import docker import fixtures import logging import pathlib", "fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write(", "= windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To", "= docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture all output to inspect, must", "temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name,", ") with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n'", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "# (c) Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed under", "_setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path,", "unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3)", "of image, as # otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry):", "import windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if", "as f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile')", "pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be added", "= '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact", "not use this file except in compliance with the License. You may obtain", "self.random_name = 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto',", "'127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact =", "temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT", "empty result c = client.containers.create(im) try: c.start() result = c.wait() output = c.logs(stdout=True,", "windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in e.out +", "capture all output to inspect, must delay removal until # after retrieval of", "self.dockerfile), tag=self.imagename) # Cleanup will be added after successful building of image, as", "License for the specific language governing permissions and limitations # under the License.", "self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) def", "= self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in", "stderr=True) finally: c.stop() c.remove() # make sure completed successfully self.assertEqual(0, result['StatusCode']) self.assertEqual('somevalue', output.decode())", "# Cleanup will be added after successful building of image, as # otherwise", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "% (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be added after successful building of", "Exception is currently raised in piece of code where this info is #", "# import docker import fixtures import logging import pathlib import unittest import unittest.mock", "# Image isn't on system so no worries pass def test_failed_image_build(self): temp =", "removal until # after retrieval of logs otherwise the API can sometimes return", "Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the", "OF ANY KIND, either express or implied. See the # License for the", "# not avaliable, as it is function parsing stream output from docker. #", "windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for line in e.out + e.errors:", "3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port,", "tag=self.imagename) # Cleanup will be added after successful building of image, as #", "self.addCleanup(client.close) # To capture all output to inspect, must delay removal until #", "e.errors: self.assertIn(line, debug_output) # Exception is currently raised in piece of code where", "License. # import docker import fixtures import logging import pathlib import unittest import", "path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename)", "content.txt\\n' 'CMD cat content.txt' ) im = windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client =", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e", "in piece of code where this info is # not avaliable, as it", "= '%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename = imagename", "e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' %", ") @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e =", "(the \"License\"); you may # not use this file except in compliance with", "+ e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile'", "@windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises(", "under the License. # import docker import fixtures import logging import pathlib import", "# # Unless required by applicable law or agreed to in writing, software", "super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with", "0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name", "logs otherwise the API can sometimes return # an empty result c =", "docker. # This will be chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def", "as f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException,", "stream output from docker. # This will be chaned in future. # self.assertIsNotNone(e.artifact_name)", "f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly,", "info is # not avaliable, as it is function parsing stream output from", "self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name)", "unittest import unittest.mock import uuid import windlass.exc import windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture):", "'127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname)", "exit 0\\n' ) windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' %", "client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture all output to inspect,", "License. You may obtain # a copy of the License at # #", "no worries pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' %", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "( self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir()", "be added after successful building of image, as # otherwise image delete would", "c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make sure completed successfully", "building of image, as # otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class", "'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as", "line in e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir()", "e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException )", "= self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output =", "'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD cat content.txt' ) im = windlass.images.build_verbosly(", "ANY KIND, either express or implied. See the # License for the specific", "from docker. # This will be chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data)", "debug_output) # Exception is currently raised in piece of code where this info", "self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound:", "Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version", "= self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def", "must delay removal until # after retrieval of logs otherwise the API can", "avaliable, as it is function parsing stream output from docker. # This will", "otherwise the API can sometimes return # an empty result c = client.containers.create(im)", "client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on system so no worries", "test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue' ) ) with", "worries pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path,", "windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname,", "TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def", "setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) def cleanUp(self): super().cleanUp()", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "# # (c) Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "e.out + e.errors: self.assertIn(line, debug_output) # Exception is currently raised in piece of", "added after successful building of image, as # otherwise image delete would fail.", "'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() ) self.useFixture( fixtures.EnvironmentVariable( 'WINDLASS_BUILDARG_ARGUMENT', 'somevalue'", "after successful building of image, as # otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove,", "c.start() result = c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make", "dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will", "self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name =", "future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture(", "self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s' % uuid.uuid4().hex self.logger =", "'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message()", "See the # License for the specific language governing permissions and limitations #", "temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in e.out", "import logging import pathlib import unittest import unittest.mock import uuid import windlass.exc import", "windlass.images import tests.test_e2e class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile =", "# Exception is currently raised in piece of code where this info is", "ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD cat content.txt' ) im =", "Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0", "with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n'", "'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException", "def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) e =", "law or agreed to in writing, software # distributed under the License is", "the License. # import docker import fixtures import logging import pathlib import unittest", "windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for", "= pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be", "express or implied. See the # License for the specific language governing permissions", "in e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() )", "'somevalue' ) ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n'", "# after retrieval of logs otherwise the API can sometimes return # an", "client.containers.create(im) try: c.start() result = c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove()", "def cleanUp(self): super().cleanUp() with docker.from_env(version='auto', timeout=180) as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: #", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name =", "open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n' )", "import docker import fixtures import logging import pathlib import unittest import unittest.mock import", "on system so no worries pass def test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() )", "self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1)", "windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line", "temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture all output", "f.write( 'FROM alpine\\n' 'RUN exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name,", "= unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts),", "# self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') )", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", ") e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) debug_output = e.debug_message() for", ") self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0], windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s' %", "self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple')", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n' )", "% self.random_name self.useFixture( DockerImage(imname, 'simple') ) e = self.assertRaises( windlass.exc.WindlassPushPullException, windlass.images.push_image, imname) self.assertIsNotNone(e.out)", "compliance with the License. You may obtain # a copy of the License", "windlass.exc.WindlassPushPullException ) def test_push_image(self): imname = '127.0.0.1:%d/%s' % ( self.registry_port, self.random_name) self.useFixture( DockerImage(imname,", "chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' %", "self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture all", "% temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 1\\n' ) e", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') ) @windlass.retry.simple(retry_backoff=0.1) def", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with", "% temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN exit 0\\n' ) windlass.images.build_verbosly(", "= e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) # Exception is", "def test_image_build_delete(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as", "after retrieval of logs otherwise the API can sometimes return # an empty", "e.debug_message() for line in e.out + e.errors: self.assertIn(line, debug_output) def test_image_build_delete(self): temp =", "self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in e.out + e.errors:", "may # not use this file except in compliance with the License. You", "windlass.images.build_verbosly( self.random_name, temp.path, dockerfile='Dockerfile') client = docker.from_env( version='auto', timeout=180) self.addCleanup(client.close) # To capture", "= '%s.Dockerfile' % imagename self.imagename = imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close)", "delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name = 'test_%s'", "either express or implied. See the # License for the specific language governing", "alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD cat content.txt' )", "as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n'", "as client: try: client.api.remove_image(self.random_name) except docker.errors.ImageNotFound: # Image isn't on system so no", "= 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException, artifact_pushing_func, mock_artifact ) self.assertEqual(len(e.attempts), 3) self.assertIsInstance( e.attempts[0],", "fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'RUN", "and limitations # under the License. # import docker import fixtures import logging", "currently raised in piece of code where this info is # not avaliable,", "this file except in compliance with the License. You may obtain # a", "self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s'", "otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self): super().setUp() self.random_name", "# self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname,", "This will be chaned in future. # self.assertIsNotNone(e.artifact_name) # self.assertIsNotNone(e.debug_data) def test_retry_push_image(self): imname", "open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'ARG ARGUMENT\\n' 'RUN echo", "or implied. See the # License for the specific language governing permissions and", "(dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be added after successful building of image,", "exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path, dockerfile='Dockerfile') self.assertIsNotNone(e.out) self.assertIsNotNone(e.errors)", "limitations # under the License. # import docker import fixtures import logging import", "where this info is # not avaliable, as it is function parsing stream", "to inspect, must delay removal until # after retrieval of logs otherwise the", "self.registry_port, self.random_name) self.useFixture( DockerImage(imname, 'simple')) windlass.images.push_image(imname) def test_build_with_buildargs(self): temp = self.useFixture( fixtures.TempDir() )", "as # otherwise image delete would fail. self.addCleanup(self.docker_client.images.remove, self.imagename) class TestDockerUtils(tests.test_e2e.FakeRegistry): def setUp(self):", "dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile' %", "Cleanup will be added after successful building of image, as # otherwise image", "line in e.out + e.errors: self.assertIn(line, debug_output) # Exception is currently raised in", "'ARG ARGUMENT\\n' 'RUN echo -n $ARGUMENT > content.txt\\n' 'CMD cat content.txt' ) im", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "self.assertIsNotNone(e.errors) self.assertIsNotNone(e.artifact_name) self.assertIsNotNone(e.debug_data) debug_output = e.debug_message() for line in e.out + e.errors: self.assertIn(line,", "of logs otherwise the API can sometimes return # an empty result c", "pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup", "'FROM alpine\\n' 'RUN exit 1\\n' ) e = self.assertRaises( windlass.exc.WindlassBuildException, windlass.images.build_verbosly, self.random_name, temp.path,", "for line in e.out + e.errors: self.assertIn(line, debug_output) # Exception is currently raised", ") ) with open('%s/Dockerfile' % temp.path, 'w') as f: f.write( 'FROM alpine\\n' 'ARG", "imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path = pathlib.Path(__file__).parent.as_posix() dockerpath = pathlib.Path(__file__).stem", "import fixtures import logging import pathlib import unittest import unittest.mock import uuid import", "API can sometimes return # an empty result c = client.containers.create(im) try: c.start()", "'%s.Dockerfile' % imagename self.imagename = imagename def _setUp(self): self.docker_client = docker.from_env(version='auto') self.addCleanup(self.docker_client.close) path", "def artifact_pushing_func(artifact): windlass.images.push_image(imname) mock_artifact = unittest.mock.MagicMock() mock_artifact.name = 'ArtifactName' e = self.assertRaises( windlass.exc.FailedRetriesException,", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "self.docker_client.images.build( path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be added after", "path=path, dockerfile='%s/%s' % (dockerpath, self.dockerfile), tag=self.imagename) # Cleanup will be added after successful", "inspect, must delay removal until # after retrieval of logs otherwise the API", "until # after retrieval of logs otherwise the API can sometimes return #", "# an empty result c = client.containers.create(im) try: c.start() result = c.wait() output", "result = c.wait() output = c.logs(stdout=True, stderr=True) finally: c.stop() c.remove() # make sure", "temp.path, dockerfile='Dockerfile') def test_failed_push_image(self): imname = '127.0.0.1:23/%s' % self.random_name self.useFixture( DockerImage(imname, 'simple') )", "class DockerImage(fixtures.Fixture): def __init__(self, imagename, dockerfileprefix=None): if dockerfileprefix: self.dockerfile = '%s.Dockerfile' % dockerfileprefix", "can sometimes return # an empty result c = client.containers.create(im) try: c.start() result", "self.dockerfile = '%s.Dockerfile' % dockerfileprefix else: self.dockerfile = '%s.Dockerfile' % imagename self.imagename =", "test_failed_image_build(self): temp = self.useFixture( fixtures.TempDir() ) with open('%s/Dockerfile' % temp.path, 'w') as f:" ]
[ "html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return html def send_request(s):", "global number_of_repeats global sleeping_time global link check_if_one = 0 what_now = 0 number_of_repeats", "</script> </body> </html> ''' if number_of_repeats <= 0: check_if_one = 1 return \"Success\"", "number_of_repeats global sleeping_time global link check_if_one = 0 what_now = 0 number_of_repeats =", "Online</title> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\">", ": \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html", "Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script>", "action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p", "Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of", "= html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t =", "== 0: number_of_repeats = int(request.args.get('number')) if sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping'))", "check_if_one == 1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP", "<meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome", "number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html", "align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\"", "\"\": s = request.args.get('text') link = s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats", "+= 1 if number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if sleeping_time == 0", "check_if_one = 1 return \"Success\" print(\"NUMBER OF REPEATS : \" + str(number_of_repeats)) html", "what_now global number_of_repeats global sleeping_time global link if check_if_one == 1: return \"Success\"", "Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one = 0 what_now = 0", "= html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return html def send_request(s): try:", "of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\">", "if (sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats)", "number_of_repeats <= 0: check_if_one = 1 return \"Success\" print(\"NUMBER OF REPEATS : \"", "#t = Timer(5.0, app.run) #t.start() return html def send_request(s): try: r = requests.get(s)", "<title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the", "print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser", "number_of_repeats = int(request.args.get('number')) if sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping')) if link", "0 sleeping_time = 0 link = \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global", "sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping')) if link == \"\": s =", "flask import Flask, request import urllib.request import requests import time from threading import", "import Timer app = Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one =", "global link if check_if_one == 1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS", "if sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping')) if link == \"\": s", "global number_of_repeats global sleeping_time global link if check_if_one == 1: return \"Success\" counter", "= \"\" print(\"WHY ___\") html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\">", "charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\">", "lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to the", "= s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter) >=", "sleeping_time global link check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time", "= 0 sleeping_time = 0 link = \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one", "align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); }); location.reload();", "return \"Success\" print(\"NUMBER OF REPEATS : \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats))", "r.status_code == 200: return 1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source():", "if link == \"\": s = request.args.get('text') link = s print(link) #response =", "chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2>", "\" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html =", "html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return html def send_request(s): try: r", "send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200:", "</p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form> <script> </body>", "0: number_of_repeats = int(request.args.get('number')) if sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping')) if", "get_ses(): global check_if_one global what_now global number_of_repeats global sleeping_time global link if check_if_one", "<= 0: check_if_one = 1 return \"Success\" print(\"NUMBER OF REPEATS : \" +", "from flask import Flask, request import urllib.request import requests import time from threading", "float(request.args.get('sleeping')) if link == \"\": s = request.args.get('text') link = s print(link) #response", "= 1 return \"Success\" print(\"NUMBER OF REPEATS : \" + str(number_of_repeats)) html =", "the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); }); location.reload(); </script> </body>", "the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number", "\"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW : \"", "app.run) #t.start() return html def send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\")", "0 link = \"\" print(\"WHY ___\") html = '''<!DOCTYPE html> <html lang=\"en\"> <head>", "counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE", "= html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return", "global what_now global number_of_repeats global sleeping_time global link check_if_one = 0 what_now =", "<input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" >", "== 200: return 1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source(): global", "link if check_if_one == 1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS", "if number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if sleeping_time == 0 : sleeping_time", "\"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now global number_of_repeats global sleeping_time global", "html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start()", "name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p>", "print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\">", "{ $.ajax({ url: '/btn_find' }); }); location.reload(); </script> </body> </html> ''' if number_of_repeats", "</body> </html> ''' if number_of_repeats <= 0: check_if_one = 1 return \"Success\" print(\"NUMBER", "return 1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source(): global check_if_one global", "chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p", "$.ajax({ url: '/btn_find' }); }); location.reload(); </script> </body> </html> ''' if number_of_repeats <=", "REPEATS : \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link))", "Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); }); location.reload(); </script> </body> </html>", "= '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script>", "= requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200: return 1 except", "html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome", "= \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now global number_of_repeats global sleeping_time", "<html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to", "+ str(what_now)) what_now += 1 if number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if", "</head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input", "''' if number_of_repeats <= 0: check_if_one = 1 return \"Success\" print(\"NUMBER OF REPEATS", "<gh_stars>0 from flask import Flask, request import urllib.request import requests import time from", "r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200: return 1", "<head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2>", "<p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form> <script> </body> </html>", "STEP IS NOW : \" + str(what_now)) what_now += 1 if number_of_repeats ==", "number_of_repeats > 0: if (sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats -= 1", "+ str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\",", "def send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code ==", "}); }); location.reload(); </script> </body> </html> ''' if number_of_repeats <= 0: check_if_one =", "number_of_repeats = 0 sleeping_time = 0 link = \"\" print(\"WHY ___\") html =", "1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\"> <head>", "1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source(): global check_if_one global what_now", "OF REPEATS : \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\",", "= 0 what_now = 0 number_of_repeats = 0 sleeping_time = 0 link =", "str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run)", "r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200: return 1 except requests.exceptions.HTTPError as", "@app.route('/') def source(): global check_if_one global what_now global number_of_repeats global sleeping_time global link", "urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats", "number_of_repeats global sleeping_time global link if check_if_one == 1: return \"Success\" counter =", "print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker", "IS NOW : \" + str(what_now)) what_now += 1 if number_of_repeats == 0:", "</p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input", "what_now = 0 number_of_repeats = 0 sleeping_time = 0 link = \"\" print(\"WHY", "html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head>", "urllib.request import requests import time from threading import Timer app = Flask(__name__) app.config['DEBUG']", "print(\"START __ 0001\") check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time", "global check_if_one global what_now global number_of_repeats global sleeping_time global link if check_if_one ==", "= int(request.args.get('number')) if sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping')) if link ==", "align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\">", "NOW : \" + str(what_now)) what_now += 1 if number_of_repeats == 0: number_of_repeats", "</head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find'", "<script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); }); location.reload(); </script> </body> </html> '''", "lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body>", "0 link = \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now global number_of_repeats", "str(what_now)) what_now += 1 if number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if sleeping_time", "#http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW : \" + str(what_now)) what_now +=", "'''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script", "while number_of_repeats > 0: if (sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats -=", "html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return html", "print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200: return 1 except requests.exceptions.HTTPError as err:", "import urllib.request import requests import time from threading import Timer app = Flask(__name__)", "<body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' });", "if number_of_repeats <= 0: check_if_one = 1 return \"Success\" print(\"NUMBER OF REPEATS :", "send_request(s) @app.route('/') def source(): global check_if_one global what_now global number_of_repeats global sleeping_time global", "WHICH STEP IS NOW : \" + str(what_now)) what_now += 1 if number_of_repeats", "align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\"", ">= 24): break counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time)", "html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script>", "html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0,", "err: send_request(s) @app.route('/') def source(): global check_if_one global what_now global number_of_repeats global sleeping_time", "== 1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS", "url: '/btn_find' }); }); location.reload(); </script> </body> </html> ''' if number_of_repeats <= 0:", "global check_if_one global what_now global number_of_repeats global sleeping_time global link check_if_one = 0", "print('#'*40) if r.status_code == 200: return 1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/')", "as err: send_request(s) @app.route('/') def source(): global check_if_one global what_now global number_of_repeats global", "to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); }); location.reload(); </script>", "html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t", "def source(): global check_if_one global what_now global number_of_repeats global sleeping_time global link check_if_one", "<input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\">", "<script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({", "<p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\"", "request.args.get('text') link = s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if", "time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title>", "IS WHICH STEP IS NOW : \" + str(what_now)) what_now += 1 if", "print(\"NUMBER OF REPEATS : \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html =", ": sleeping_time = float(request.args.get('sleeping')) if link == \"\": s = request.args.get('text') link =", "'/btn_find' }); }); location.reload(); </script> </body> </html> ''' if number_of_repeats <= 0: check_if_one", "app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one = 0 what_now = 0 number_of_repeats", "sleeping_time global link if check_if_one == 1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start", "requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200: return 1 except requests.exceptions.HTTPError", "= Timer(5.0, app.run) #t.start() return html def send_request(s): try: r = requests.get(s) r.raise_for_status()", "global sleeping_time global link check_if_one = 0 what_now = 0 number_of_repeats = 0", "= 0 link = \"\" print(\"WHY ___\") html = '''<!DOCTYPE html> <html lang=\"en\">", "__ 0001\") check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time =", "== 0 : sleeping_time = float(request.args.get('sleeping')) if link == \"\": s = request.args.get('text')", "number_of_repeats = 0 sleeping_time = 0 link = \"\" @app.route(\"/btn_find\") def get_ses(): global", "sleeping_time = float(request.args.get('sleeping')) if link == \"\": s = request.args.get('text') link = s", "jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); }); location.reload(); </script> </body> </html> ''' if", "src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() {", "= 0 number_of_repeats = 0 sleeping_time = 0 link = \"\" print(\"WHY ___\")", "global link check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time =", "1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW : \" + str(what_now)) what_now", "= 0 link = \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now global", "send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta", "<h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p", "str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return html def", "name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p>", "<meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form", "type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p>", "<p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\"", "html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script", "False print(\"START __ 0001\") check_if_one = 0 what_now = 0 number_of_repeats = 0", "requests import time from threading import Timer app = Flask(__name__) app.config['DEBUG'] = False", "s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter) >= 24):", "<title>Clicker chooser Online</title> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3>", "location.reload(); </script> </body> </html> ''' if number_of_repeats <= 0: check_if_one = 1 return", "break counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html =", "try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code == 200: return", "global what_now global number_of_repeats global sleeping_time global link if check_if_one == 1: return", "print(\"THIS IS WHICH STEP IS NOW : \" + str(what_now)) what_now += 1", ": \" + str(what_now)) what_now += 1 if number_of_repeats == 0: number_of_repeats =", "from threading import Timer app = Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\")", "@app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now global number_of_repeats global sleeping_time global link", "charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to", "}); location.reload(); </script> </body> </html> ''' if number_of_repeats <= 0: check_if_one = 1", "0: check_if_one = 1 return \"Success\" print(\"NUMBER OF REPEATS : \" + str(number_of_repeats))", "time from threading import Timer app = Flask(__name__) app.config['DEBUG'] = False print(\"START __", "check_if_one global what_now global number_of_repeats global sleeping_time global link check_if_one = 0 what_now", "0 number_of_repeats = 0 sleeping_time = 0 link = \"\" print(\"WHY ___\") html", "1 if number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if sleeping_time == 0 :", "0 sleeping_time = 0 link = \"\" print(\"WHY ___\") html = '''<!DOCTYPE html>", "value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping", "value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\">", "link = \"\" print(\"WHY ___\") html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta", "import Flask, request import urllib.request import requests import time from threading import Timer", "== \"\": s = request.args.get('text') link = s print(link) #response = urllib.request.urlopen(request.form['text']) while", "= request.args.get('text') link = s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0:", "return html def send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if", "= Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one = 0 what_now =", "requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source(): global check_if_one global what_now global number_of_repeats", "</p> <h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3>", "name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form> <script> </body> </html> ''' return html", "= 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW : \" + str(what_now))", "<script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function()", "Flask, request import urllib.request import requests import time from threading import Timer app", "\" + str(what_now)) what_now += 1 if number_of_repeats == 0: number_of_repeats = int(request.args.get('number'))", "<h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url: '/btn_find' }); });", "what_now global number_of_repeats global sleeping_time global link check_if_one = 0 what_now = 0", "0001\") check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time = 0", "align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form> <script> </body> </html> '''", "= False print(\"START __ 0001\") check_if_one = 0 what_now = 0 number_of_repeats =", "s = request.args.get('text') link = s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats >", "<input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\"", "return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW :", "request import urllib.request import requests import time from threading import Timer app =", "\"\" print(\"WHY ___\") html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker", "<h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\"", "number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if sleeping_time == 0 : sleeping_time =", "sleeping_time = 0 link = \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now", "sleeping_time = 0 link = \"\" print(\"WHY ___\") html = '''<!DOCTYPE html> <html", "0 number_of_repeats = 0 sleeping_time = 0 link = \"\" @app.route(\"/btn_find\") def get_ses():", "def get_ses(): global check_if_one global what_now global number_of_repeats global sleeping_time global link if", "to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p>", "align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\"", "repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input", "= '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body>", "200: return 1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source(): global check_if_one", "#response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter) >= 24): break counter", "link check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time = 0", "threading import Timer app = Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one", "global sleeping_time global link if check_if_one == 1: return \"Success\" counter = 1", "link = \"\" @app.route(\"/btn_find\") def get_ses(): global check_if_one global what_now global number_of_repeats global", "html def send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40) if r.status_code", "src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2 align=\"center\">Welcome to the Clicker.online!{number_repeats}</h2> <script> jQuery(document).ready(function() { $.ajax({ url:", "what_now = 0 number_of_repeats = 0 sleeping_time = 0 link = \"\" @app.route(\"/btn_find\")", "time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\"", "'''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> </head> <body> <h2", "link == \"\": s = request.args.get('text') link = s print(link) #response = urllib.request.urlopen(request.form['text'])", "source(): global check_if_one global what_now global number_of_repeats global sleeping_time global link check_if_one =", "print(\"YES\") print('#'*40) if r.status_code == 200: return 1 except requests.exceptions.HTTPError as err: send_request(s)", "#t.start() return html def send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40) print(\"YES\") print('#'*40)", "name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\"> <input name=\"number\" type=\"text\" value=\"\">", "+=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html>", "type=\"text\" value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form>", "app = Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one = 0 what_now", "0 : sleeping_time = float(request.args.get('sleeping')) if link == \"\": s = request.args.get('text') link", "1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW", "___\") html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title>", "value=\"\"> </p> <p align=\"center\"> <input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form> <script>", "<h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\"", "import requests import time from threading import Timer app = Flask(__name__) app.config['DEBUG'] =", "<head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head> <body> <h2", "Timer(5.0, app.run) #t.start() return html def send_request(s): try: r = requests.get(s) r.raise_for_status() print('#'*40)", "Timer app = Flask(__name__) app.config['DEBUG'] = False print(\"START __ 0001\") check_if_one = 0", "1 return \"Success\" print(\"NUMBER OF REPEATS : \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\",", "\"Success\" print(\"NUMBER OF REPEATS : \" + str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html", "<body> <h2 align=\"center\">Welcome to the Clicker.online0001!</h2> <form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\"", "if r.status_code == 200: return 1 except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def", "print(\"WHY ___\") html = '''<!DOCTYPE html> <html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser", "= float(request.args.get('sleeping')) if link == \"\": s = request.args.get('text') link = s print(link)", "import time from threading import Timer app = Flask(__name__) app.config['DEBUG'] = False print(\"START", "= 0 number_of_repeats = 0 sleeping_time = 0 link = \"\" @app.route(\"/btn_find\") def", "<h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\">", "> 0: if (sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats -= 1 send_request(link)", "link = s print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter)", "24): break counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html", "0: if (sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40)", "what_now += 1 if number_of_repeats == 0: number_of_repeats = int(request.args.get('number')) if sleeping_time ==", "print(link) #response = urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter) >= 24): break", "<html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <title>Clicker chooser Online</title> <script src=\"//code.jquery.com/jquery-1.11.3.min.js\"></script> <script src=\"//code.jquery.com/jquery-migrate-1.2.1.min.js\"></script> </head>", "str(number_of_repeats)) html = html.replace(\"{number_repeats}\", str(number_of_repeats)) html = html.replace(\"{link}\", str(link)) html = html.replace(\"{sleeping}\", str(sleeping_time))", "<p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3> <p align=\"center\"> <input", "check_if_one = 0 what_now = 0 number_of_repeats = 0 sleeping_time = 0 link", "check_if_one global what_now global number_of_repeats global sleeping_time global link if check_if_one == 1:", "int(request.args.get('number')) if sleeping_time == 0 : sleeping_time = float(request.args.get('sleeping')) if link == \"\":", "0 what_now = 0 number_of_repeats = 0 sleeping_time = 0 link = \"\"", "= urllib.request.urlopen(request.form['text']) while number_of_repeats > 0: if (sleeping_time*(counter) >= 24): break counter +=1", "<input name=\"start\" id=\"BTN\" type=\"submit\" value=\"Start\" > </p> </form> <script> </body> </html> ''' return", "str(sleeping_time)) #t = Timer(5.0, app.run) #t.start() return html def send_request(s): try: r =", "= 0 sleeping_time = 0 link = \"\" print(\"WHY ___\") html = '''<!DOCTYPE", "if check_if_one == 1: return \"Success\" counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH", "<form action=\"/btn_find\"> <h3>Link</h3> <p align=\"center\"> <input name=\"text\" type=\"text\" value=\"\"> </p> <h3>Number of repeats</h3>", "type=\"text\" value=\"\"> </p> <h3>Sleeping time</h3> <p align=\"center\"> <input name=\"sleeping\" type=\"text\" value=\"\"> </p> <p", "(sleeping_time*(counter) >= 24): break counter +=1 number_of_repeats -= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40)", "</html> ''' if number_of_repeats <= 0: check_if_one = 1 return \"Success\" print(\"NUMBER OF", "counter = 1 #http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start print(\"THIS IS WHICH STEP IS NOW : \" +", "except requests.exceptions.HTTPError as err: send_request(s) @app.route('/') def source(): global check_if_one global what_now global", "-= 1 send_request(link) print('#'*40) print(number_of_repeats) print('#'*40) time.sleep(sleeping_time) html = '''<!DOCTYPE html> <html lang=\"en\">" ]
[ "dataclasses import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort: int = -1", "int = -1 def __lt__(self, other: object): if not isinstance(other, NamespaceChild): return NotImplemented", "coding: utf-8 from dataclasses import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort:", "@dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort: int = -1 def __lt__(self, other:", "= -1 def __lt__(self, other: object): if not isinstance(other, NamespaceChild): return NotImplemented return", "# coding: utf-8 from dataclasses import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str", "NamespaceChild: namespace: str sort: int = -1 def __lt__(self, other: object): if not", "utf-8 from dataclasses import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort: int", "class NamespaceChild: namespace: str sort: int = -1 def __lt__(self, other: object): if", "__lt__(self, other: object): if not isinstance(other, NamespaceChild): return NotImplemented return self.sort < other.sort", "-1 def __lt__(self, other: object): if not isinstance(other, NamespaceChild): return NotImplemented return self.sort", "def __lt__(self, other: object): if not isinstance(other, NamespaceChild): return NotImplemented return self.sort <", "str sort: int = -1 def __lt__(self, other: object): if not isinstance(other, NamespaceChild):", "dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort: int = -1 def __lt__(self,", "<filename>src/data_manage/data_class/namespace_child.py # coding: utf-8 from dataclasses import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace:", "import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort: int = -1 def", "namespace: str sort: int = -1 def __lt__(self, other: object): if not isinstance(other,", "from dataclasses import dataclass @dataclass(frozen=True, eq=True) class NamespaceChild: namespace: str sort: int =", "sort: int = -1 def __lt__(self, other: object): if not isinstance(other, NamespaceChild): return", "eq=True) class NamespaceChild: namespace: str sort: int = -1 def __lt__(self, other: object):" ]
[ "db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def scrape(self,", "in j[\"results\"]: if result[\"id\"] not in taken_ids \\ and \"landing_page\" in result[\"refs\"] \\", "result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self, params):", "db_utils.job import Job from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source", "__init__(self): self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db = True): jobs_table = get_jobs_table()", "= 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers) time.sleep(0.7)", "in levels: total_pages = 9 for page in range(1, total_pages): params = {", "get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db", "cities from db_utils.job import Job from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def", "link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def", "= self.get_query_results(params) total_pages = int(j['page_count']) for result in j[\"results\"]: if result[\"id\"] not in", "'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers) time.sleep(0.7) j", "name = result[\"name\"], category = category, city = city, source = self.source, contents", "city = city, source = self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date", "from config import levels, headers, cities from db_utils.job import Job from db_utils.db_methods import", "import Job from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source =", "total_pages = int(j['page_count']) for result in j[\"results\"]: if result[\"id\"] not in taken_ids \\", "and len(result[\"locations\"]) > 0 \\ and city == result[\"locations\"][0][\"name\"]: category = \"none\" if", "category = category, city = city, source = self.source, contents = result[\"contents\"], company", "self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"],", "9 for page in range(1, total_pages): params = { 'page': str(page), 'location': city,", "requests from config import levels, headers, cities from db_utils.job import Job from db_utils.db_methods", "and city == result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) > 0: category =", "params = { 'page': str(page), 'location': city, 'level': level} j = self.get_query_results(params) total_pages", "if result[\"id\"] not in taken_ids \\ and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"])", "= get_taken_ids(city, self.source) for level in levels: total_pages = 9 for page in", "\\ and city == result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) > 0: category", "city, 'level': level} j = self.get_query_results(params) total_pages = int(j['page_count']) for result in j[\"results\"]:", "\"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\ and city == result[\"locations\"][0][\"name\"]:", "result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self, params): params['descending'] = 'true'", "source = self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link", "job.insert_into_table(jobs_table) else: return job def get_query_results(self, params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa'", "> 0: category = result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"], category =", "taken_ids = get_taken_ids(city, self.source) for level in levels: total_pages = 9 for page", "= 9 for page in range(1, total_pages): params = { 'page': str(page), 'location':", "\\ and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\ and city", "category = result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"], category = category, city", "import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def scrape(self, city,", "self.source) for level in levels: total_pages = 9 for page in range(1, total_pages):", "\"none\" if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job = Job( name =", "Job( name = result[\"name\"], category = category, city = city, source = self.source,", "= \"themuse\" def scrape(self, city, insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids =", "and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\ and city ==", "= 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers", "time import requests from config import levels, headers, cities from db_utils.job import Job", "= result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"], category = category, city =", "total_pages): params = { 'page': str(page), 'location': city, 'level': level} j = self.get_query_results(params)", "result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\ and city == result[\"locations\"][0][\"name\"]: category =", "return job def get_query_results(self, params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r =", "if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self, params): params['descending'] = 'true' params['api_key']", "= { 'page': str(page), 'location': city, 'level': level} j = self.get_query_results(params) total_pages =", "get_taken_ids(city, self.source) for level in levels: total_pages = 9 for page in range(1,", "len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"], category", "{ 'page': str(page), 'location': city, 'level': level} j = self.get_query_results(params) total_pages = int(j['page_count'])", "in result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\ and city == result[\"locations\"][0][\"name\"]: category", "j = self.get_query_results(params) total_pages = int(j['page_count']) for result in j[\"results\"]: if result[\"id\"] not", "get_taken_ids class TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db =", "city, insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level", "for page in range(1, total_pages): params = { 'page': str(page), 'location': city, 'level':", "range(1, total_pages): params = { 'page': str(page), 'location': city, 'level': level} j =", "page in range(1, total_pages): params = { 'page': str(page), 'location': city, 'level': level}", "'page': str(page), 'location': city, 'level': level} j = self.get_query_results(params) total_pages = int(j['page_count']) for", "headers, cities from db_utils.job import Job from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler():", "params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers)", "levels: total_pages = 9 for page in range(1, total_pages): params = { 'page':", "= result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self,", "= Job( name = result[\"name\"], category = category, city = city, source =", "= get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level in levels: total_pages = 9", "city == result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower()", "def get_query_results(self, params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs',", "insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self, params): params['descending'] = 'true' params['api_key'] =", "params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params,", "TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db = True): jobs_table", "j[\"results\"]: if result[\"id\"] not in taken_ids \\ and \"landing_page\" in result[\"refs\"] \\ and", "= result[\"name\"], category = category, city = city, source = self.source, contents =", "import levels, headers, cities from db_utils.job import Job from db_utils.db_methods import get_jobs_table, get_taken_ids", "jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level in levels: total_pages =", "in range(1, total_pages): params = { 'page': str(page), 'location': city, 'level': level} j", "= self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link =", "len(result[\"locations\"]) > 0 \\ and city == result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"])", "scrape(self, city, insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source) for", "Job from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source = \"themuse\"", "category = \"none\" if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job = Job(", "result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"])", "city, source = self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"],", "int(j['page_count']) for result in j[\"results\"]: if result[\"id\"] not in taken_ids \\ and \"landing_page\"", "\\ and len(result[\"locations\"]) > 0 \\ and city == result[\"locations\"][0][\"name\"]: category = \"none\"", "import time import requests from config import levels, headers, cities from db_utils.job import", "job = Job( name = result[\"name\"], category = category, city = city, source", "r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers) time.sleep(0.7) j =", "str(page), 'location': city, 'level': level} j = self.get_query_results(params) total_pages = int(j['page_count']) for result", "insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level in", "import requests from config import levels, headers, cities from db_utils.job import Job from", "0: category = result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"], category = category,", "= city, source = self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date =", "if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"],", "def scrape(self, city, insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source)", "> 0 \\ and city == result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) >", "= \"none\" if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job = Job( name", "result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job", "levels, headers, cities from db_utils.job import Job from db_utils.db_methods import get_jobs_table, get_taken_ids class", "'location': city, 'level': level} j = self.get_query_results(params) total_pages = int(j['page_count']) for result in", "job def get_query_results(self, params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get(", "get_query_results(self, params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params", "\"themuse\" def scrape(self, city, insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city,", "= int(j['page_count']) for result in j[\"results\"]: if result[\"id\"] not in taken_ids \\ and", "self.get_query_results(params) total_pages = int(j['page_count']) for result in j[\"results\"]: if result[\"id\"] not in taken_ids", "get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level in levels: total_pages = 9 for", "result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job =", "= True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level in levels:", "'level': level} j = self.get_query_results(params) total_pages = int(j['page_count']) for result in j[\"results\"]: if", "job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self, params): params['descending']", "result[\"categories\"][0][\"name\"].lower() job = Job( name = result[\"name\"], category = category, city = city,", "'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers =", "in taken_ids \\ and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\", "level in levels: total_pages = 9 for page in range(1, total_pages): params =", "result[\"id\"] not in taken_ids \\ and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) >", "taken_ids \\ and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) > 0 \\ and", "True): jobs_table = get_jobs_table() taken_ids = get_taken_ids(city, self.source) for level in levels: total_pages", "def __init__(self): self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db = True): jobs_table =", "result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table)", "= result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id =", "date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else:", "class TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db = True):", "== result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) > 0: category = result[\"categories\"][0][\"name\"].lower() job", "= requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers) time.sleep(0.7) j = r.json()", "category, city = city, source = self.source, contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(),", "for result in j[\"results\"]: if result[\"id\"] not in taken_ids \\ and \"landing_page\" in", "config import levels, headers, cities from db_utils.job import Job from db_utils.db_methods import get_jobs_table,", "not in taken_ids \\ and \"landing_page\" in result[\"refs\"] \\ and len(result[\"locations\"]) > 0", "level} j = self.get_query_results(params) total_pages = int(j['page_count']) for result in j[\"results\"]: if result[\"id\"]", "result in j[\"results\"]: if result[\"id\"] not in taken_ids \\ and \"landing_page\" in result[\"refs\"]", "= result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return", "'https://www.themuse.com/api/public/jobs', params = params, headers = headers) time.sleep(0.7) j = r.json() return j", "total_pages = 9 for page in range(1, total_pages): params = { 'page': str(page),", "result[\"name\"], category = category, city = city, source = self.source, contents = result[\"contents\"],", "= result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if insert_jobs_into_db:", "self.source = \"themuse\" def scrape(self, city, insert_jobs_into_db = True): jobs_table = get_jobs_table() taken_ids", "= result[\"id\"]) if insert_jobs_into_db: job.insert_into_table(jobs_table) else: return job def get_query_results(self, params): params['descending'] =", "from db_utils.job import Job from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self):", "from db_utils.db_methods import get_jobs_table, get_taken_ids class TheMuseCrawler(): def __init__(self): self.source = \"themuse\" def", "= category, city = city, source = self.source, contents = result[\"contents\"], company =", "0 \\ and city == result[\"locations\"][0][\"name\"]: category = \"none\" if len(result[\"categories\"]) > 0:", "for level in levels: total_pages = 9 for page in range(1, total_pages): params", "contents = result[\"contents\"], company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id", "params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r = requests.get( 'https://www.themuse.com/api/public/jobs', params =", "else: return job def get_query_results(self, params): params['descending'] = 'true' params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa' r", "company = result[\"company\"][\"name\"].lower(), date = result[\"publication_date\"], link = result[\"refs\"][\"landing_page\"], job_id = result[\"id\"]) if", "requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers) time.sleep(0.7) j = r.json() return" ]
[ "import os import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as", "check_box_frustum(box, P, center, dimension, angle): x1, y1, x2, y2 = box box_corner =", "70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:, 1] =", "project_image_to_rect(xyz1, P) fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect,", "2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4,", "import matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:,", "ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def check_box_frustum(box, P, center, dimension, angle): x1,", "# ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points, ax) draw_box3d(gt_box3d_corners,", "= adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal')", "math import time import pickle import sys import os import numpy as np", "adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points,", "def check_box_frustum(box, P, center, dimension, angle): x1, y1, x2, y2 = box box_corner", "= y1 xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure()", "def adjust_coord_for_view(points): return points[:, [2, 0, 1]] * np.array([1, -1, -1]) def draw_box3d(corners,", "def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def check_box_frustum(box, P, center,", "P) xyz1[:, 0] = x1 xyz1[:, 1] = y2 xyz1[:, 2] = z1", "= adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on()", "ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points, ax) draw_box3d(gt_box3d_corners, ax) draw_box3d(pred_box3d_corners, ax)", "''' 8, 3 ''' order = np.array([ 0, 1, 1, 2, 2, 3,", "def draw_box3d(corners, ax): ''' 8, 3 ''' order = np.array([ 0, 1, 1,", "sys import os import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot", "z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y2", "plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect,", "def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d') points", "z1 xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax)", "np.array([ 0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6,", "2]) def check_box_frustum(box, P, center, dimension, angle): x1, y1, x2, y2 = box", "pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax)", "= compute_box_3d(center, dimension, angle, P) # 8, 3 z1 = np.arange(0, 70, 0.1)", "2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3,", "gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y')", "2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def check_box_frustum(box, P,", "3 z1 = np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] =", "ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points", "ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners", "pts[:, 2]) def check_box_frustum(box, P, center, dimension, angle): x1, y1, x2, y2 =", "3 ''' order = np.array([ 0, 1, 1, 2, 2, 3, 3, 0,", "0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:,", "adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') #", "5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6,", "y1 xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure() ax", "ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points, ax) draw_box3d(gt_box3d_corners, ax)", "2) for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts,", "8, 3 z1 = np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0]", "compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0, 1]] * np.array([1, -1, -1]) def", "= project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:, 1] = y2 xyz1[:, 2]", "xyz1[:, 0] = x2 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz4_rect", "draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners):", "= y2 xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] =", "1] = y1 xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0]", "0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6,", "import math import time import pickle import sys import os import numpy as", "np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:, 1]", "project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:, 1] = y2 xyz1[:, 2] =", "box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3 z1 = np.arange(0, 70,", "y2 xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x1", "corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2])", "numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from datasets.data_utils", "time import pickle import sys import os import numpy as np from mpl_toolkits.mplot3d", "1], pts[:, 2]) def check_box_frustum(box, P, center, dimension, angle): x1, y1, x2, y2", "z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y1", "os import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt", "y1, x2, y2 = box box_corner = compute_box_3d(center, dimension, angle, P) # 8,", "y2 = box box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3 z1", "from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect, compute_box_3d", "Axes3D import matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return", "ax): ''' 8, 3 ''' order = np.array([ 0, 1, 1, 2, 2,", "xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure() ax =", "4, 3, 7, 0, 4, 2, 6, 1, 5]).reshape(-1, 2) for i in", "x2 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1, P)", "P) # 8, 3 z1 = np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3))", "= fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show()", "points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) #", "matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2,", "= z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] =", "y2 xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2", "fig = plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners", "P) xyz1[:, 0] = x2 xyz1[:, 1] = y2 xyz1[:, 2] = z1", "draw_box3d(corners, ax): ''' 8, 3 ''' order = np.array([ 0, 1, 1, 2,", "ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def check_box_frustum(box, P, center, dimension, angle):", "xyz1[:, 0] = x2 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz2_rect", "xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:,", "points[:, [2, 0, 1]] * np.array([1, -1, -1]) def draw_box3d(corners, ax): ''' 8,", "7, 4, 3, 7, 0, 4, 2, 6, 1, 5]).reshape(-1, 2) for i", "-1, -1]) def draw_box3d(corners, ax): ''' 8, 3 ''' order = np.array([ 0,", "xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y2 xyz1[:,", "plt from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0, 1]]", "[2, 0, 1]] * np.array([1, -1, -1]) def draw_box3d(corners, ax): ''' 8, 3", "draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax", "2] = z1 xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure() ax = fig.gca(projection='3d')", "z1 = np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] = x1", "4, 2, 6, 1, 5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i],", "adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x')", "0] = x2 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz2_rect =", "x1 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1, P)", "= x1 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1,", "angle): x1, y1, x2, y2 = box box_corner = compute_box_3d(center, dimension, angle, P)", "= np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:,", "1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7,", "np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:, 1] = y1 xyz1[:, 2] =", "= z1 xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner,", "angle, P) # 8, 3 z1 = np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1),", "x2 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1, P)", "draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def check_box_frustum(box, P, center, dimension,", "= x1 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1,", "0], pts[:, 1], pts[:, 2]) def check_box_frustum(box, P, center, dimension, angle): x1, y1,", "project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y2 xyz1[:, 2] =", "xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:,", "x2, y2 = box box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3", "= x2 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1,", "import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0, 1]] * np.array([1, -1,", "pickle import sys import os import numpy as np from mpl_toolkits.mplot3d import Axes3D", "fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners)", "# 8, 3 z1 = np.arange(0, 70, 0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:,", "adjust_coord_for_view(points): return points[:, [2, 0, 1]] * np.array([1, -1, -1]) def draw_box3d(corners, ax):", "2] = z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:, 1]", "import Axes3D import matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points):", "xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:, 1] = y1 xyz1[:,", "box box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3 z1 = np.arange(0,", "z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:, 1] = y2", "check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d') points =", "draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self,", "compute_box_3d(center, dimension, angle, P) # 8, 3 z1 = np.arange(0, 70, 0.1) xyz1", "3, 7, 0, 4, 2, 6, 1, 5]).reshape(-1, 2) for i in range(len(order)):", "0] = x2 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz4_rect =", "draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig =", "= project_image_to_rect(xyz1, P) fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax)", "= z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] =", "project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0, 1]] * np.array([1, -1, -1])", "ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig", "P, center, dimension, angle): x1, y1, x2, y2 = box box_corner = compute_box_3d(center,", "corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def check_box_frustum(box,", "= project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y1 xyz1[:, 2]", "pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points)", "''' order = np.array([ 0, 1, 1, 2, 2, 3, 3, 0, 4,", "0] = x1 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz1_rect =", "= y2 xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] =", "import sys import os import numpy as np from mpl_toolkits.mplot3d import Axes3D import", "project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y1 xyz1[:, 2] =", "6, 7, 7, 4, 3, 7, 0, 4, 2, 6, 1, 5]).reshape(-1, 2)", "xyz4_rect = project_image_to_rect(xyz1, P) fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect,", "1]] * np.array([1, -1, -1]) def draw_box3d(corners, ax): ''' 8, 3 ''' order", "2, 6, 1, 5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1],", "pts[:, 1], pts[:, 2]) def check_box_frustum(box, P, center, dimension, angle): x1, y1, x2,", "xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1, P) fig", "= plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax)", "1] = y2 xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0]", "x1 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1, P)", "= z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:, 1] =", "ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal')", "y1 xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2", "xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:, 1] = y2 xyz1[:,", "3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7,", "= project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y2 xyz1[:, 2]", "4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2,", "as plt from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0,", "xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1] = y1 xyz1[:,", "1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2]) def", "= adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z')", "6, 1, 5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i],", "1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7,", "= y1 xyz1[:, 2] = z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] =", "7, 0, 4, 2, 6, 1, 5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i],", "2] = z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1]", "P) xyz1[:, 0] = x2 xyz1[:, 1] = y1 xyz1[:, 2] = z1", "0, 1]] * np.array([1, -1, -1]) def draw_box3d(corners, ax): ''' 8, 3 '''", "= np.array([ 0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5,", "mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect, compute_box_3d def", "in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0],", "dimension, angle): x1, y1, x2, y2 = box box_corner = compute_box_3d(center, dimension, angle,", "dimension, angle, P) # 8, 3 z1 = np.arange(0, 70, 0.1) xyz1 =", "0] = x1 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz3_rect =", "datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0, 1]] * np.array([1,", "= adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points,", "ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:, 1],", "8, 3 ''' order = np.array([ 0, 1, 1, 2, 2, 3, 3,", "from datasets.data_utils import project_image_to_rect, compute_box_3d def adjust_coord_for_view(points): return points[:, [2, 0, 1]] *", "0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4,", "1] = y2 xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0]", "* np.array([1, -1, -1]) def draw_box3d(corners, ax): ''' 8, 3 ''' order =", "return points[:, [2, 0, 1]] * np.array([1, -1, -1]) def draw_box3d(corners, ax): '''", "2] = z1 xyz1_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:, 1]", "np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from datasets.data_utils import project_image_to_rect,", "P) fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax)", "# ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points, ax) draw_box3d(gt_box3d_corners, ax) draw_box3d(pred_box3d_corners,", "5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def", "-1]) def draw_box3d(corners, ax): ''' 8, 3 ''' order = np.array([ 0, 1,", "adjust_coord_for_view(pred_box3d_corners) # ax.set_aspect('equal') # ax.axis('equal') ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points, ax)", "1, 5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2])", "5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6, 1,", "np.array([1, -1, -1]) def draw_box3d(corners, ax): ''' 8, 3 ''' order = np.array([", "plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners)", "fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def", "= x2 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1,", "i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:,", "import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from", "0, 4, 2, 6, 1, 5]).reshape(-1, 2) for i in range(len(order)): ax.plot(corners[order[i], 0],", "center, dimension, angle): x1, y1, x2, y2 = box box_corner = compute_box_3d(center, dimension,", "import time import pickle import sys import os import numpy as np from", "xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x1 xyz1[:,", "gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points =", "3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0,", "ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points,", "= plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners =", "draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points,", "order = np.array([ 0, 1, 1, 2, 2, 3, 3, 0, 4, 5,", "xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:, 0] = x2 xyz1[:,", "ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax)", "xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz2_rect = project_image_to_rect(xyz1, P) xyz1[:,", "0.1) xyz1 = np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:, 1] = y1", "range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax): ax.scatter(pts[:, 0], pts[:,", "ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect, ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners,", "= box box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3 z1 =", "import pickle import sys import os import numpy as np from mpl_toolkits.mplot3d import", "3)) xyz1[:, 0] = x1 xyz1[:, 1] = y1 xyz1[:, 2] = z1", "1] = y1 xyz1[:, 2] = z1 xyz4_rect = project_image_to_rect(xyz1, P) fig =", "ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax =", "7, 7, 4, 3, 7, 0, 4, 2, 6, 1, 5]).reshape(-1, 2) for", "fig = plt.figure() ax = fig.gca(projection='3d') draw_box3d(box_corner, ax) draw_points(xyz1_rect, ax) draw_points(xyz2_rect, ax) draw_points(xyz3_rect,", "x1, y1, x2, y2 = box box_corner = compute_box_3d(center, dimension, angle, P) #", "xyz1[:, 0] = x1 xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz3_rect", "6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6, 1, 5]).reshape(-1,", "= fig.gca(projection='3d') points = adjust_coord_for_view(points) ref_points = adjust_coord_for_view(ref_points) gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners) pred_box3d_corners =", "xyz1[:, 0] = x1 xyz1[:, 1] = y1 xyz1[:, 2] = z1 xyz1_rect", "for i in range(len(order)): ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2]) def draw_points(pts, ax):", "as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from datasets.data_utils import", "plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d')", "ax.set_axis_on() ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_points(points, ax) draw_points(ref_points, ax) draw_box3d(gt_box3d_corners, ax) draw_box3d(pred_box3d_corners, ax) plt.show()", "= np.zeros((len(z1), 3)) xyz1[:, 0] = x1 xyz1[:, 1] = y1 xyz1[:, 2]", "xyz1[:, 1] = y2 xyz1[:, 2] = z1 xyz3_rect = project_image_to_rect(xyz1, P) xyz1[:,", "points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure() ax = fig.gca(projection='3d') points = adjust_coord_for_view(points)", "ax) draw_points(xyz4_rect, ax) plt.show() def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners): fig = plt.figure()" ]
[ "async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\"", "только название канала, например: Социопат. Если считаешь, что необходимо создать новый канал, напиши", "сообщение уйдёт в пост!\", \"nopost\": \"Упс... У нас нет такого канала. Проверь написание:", "'/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен и скоро появится", "помогаешь нам! ''', \"category_yes\": \"Почти всё готово! Теперь прикрепи картинку, текст или другой", "\"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await", "всё готово! Теперь прикрепи картинку, текст или другой медиафайл одним сообщением. Только первое", "(мемы), Котики, Экология. Не нашли нужный канал или хотите создать новый? Напишите \"/new", "или другой медиафайл одним сообщением. Только первое сообщение уйдёт в пост!\", \"nopost\": \"Упс...", "Просто введи его название без лишних символов: Андроид (канал про Android OS), Социопат", "await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await", "await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"],", "пост успешно отправлен и скоро появится у нас на канале!\", \"postSuccess\": \"Пост {author}", "в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери", "в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста в", "список бота! Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти всё готово! Теперь прикрепи", "def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category", "кнопку ниже для добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\":", "Андроид (канал про Android OS), Социопат (мемы), Котики, Экология. Не нашли нужный канал", "await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if", "описание\" без кавычек, и мы добавим этот канал в список бота! Спасибо, что", "канала, например: Социопат. Если считаешь, что необходимо создать новый канал, напиши боту '/new", "каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста в очередь рассмотрения.\",", "боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен и скоро", "мы добавим этот канал в список бота! Спасибо, что помогаешь нам! ''', \"category_yes\":", "ниже для добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]),", "что помогаешь нам! ''', \"category_yes\": \"Почти всё готово! Теперь прикрепи картинку, текст или", "что необходимо создать новый канал, напиши боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю!", "text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async", "await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath = [] android = [] cats", "нужный канал или хотите создать новый? Напишите \"/new Название канала <Enter> Краткое описание\"", "Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти всё готово! Теперь прикрепи картинку, текст", "или хотите создать новый? Напишите \"/new Название канала <Enter> Краткое описание\" без кавычек,", "напиши боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен и", "другой медиафайл одним сообщением. Только первое сообщение уйдёт в пост!\", \"nopost\": \"Упс... У", "await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"],", "Не нашли нужный канал или хотите создать новый? Напишите \"/new Название канала <Enter>", "готово! Теперь прикрепи картинку, текст или другой медиафайл одним сообщением. Только первое сообщение", "\"success\": \"Поздравляю! Твой пост успешно отправлен и скоро появится у нас на канале!\",", "start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category =", "bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category,", "= Bot(token=TOKEN) replics = { \"start\": \"Привет! С помощью этого бота ты можешь", "Напишите \"/new Название канала <Enter> Краткое описание\" без кавычек, и мы добавим этот", "category.replace(\" \", \"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await", "post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower()", "канала. Проверь написание: отсутствие символов или опечатки. Введи только название канала, например: Социопат.", "Проверь написание: отсутствие символов или опечатки. Введи только название канала, например: Социопат. Если", "= post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath = [] android", "bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath = [] android = [] cats =", "новый? Напишите \"/new Название канала <Enter> Краткое описание\" без кавычек, и мы добавим", "import asyncio TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics =", "материал в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста", "text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category", "main(): sociopath = [] android = [] cats = [] eco = []", "Название канала <Enter> Краткое описание\" без кавычек, и мы добавим этот канал в", "нам! ''', \"category_yes\": \"Почти всё готово! Теперь прикрепи картинку, текст или другой медиафайл", "создать новый? Напишите \"/new Название канала <Enter> Краткое описание\" без кавычек, и мы", "= \"**********@chat.agent\" bot = Bot(token=TOKEN) replics = { \"start\": \"Привет! С помощью этого", "[] android = [] cats = [] eco = [] await bot.add_handler(sociopath) await", "Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен и скоро появится у нас на", "= \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics = { \"start\": \"Привет!", "Краткое описание\" без кавычек, и мы добавим этот канал в список бота! Спасибо,", "символов: Андроид (канал про Android OS), Социопат (мемы), Котики, Экология. Не нашли нужный", "добавим этот канал в список бота! Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти", "новый канал, напиши боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно", "category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"])", "успешно отправлен и скоро появится у нас на канале!\", \"postSuccess\": \"Пост {author} для", "Социопат (мемы), Котики, Экология. Не нашли нужный канал или хотите создать новый? Напишите", "одним сообщением. Только первое сообщение уйдёт в пост!\", \"nopost\": \"Упс... У нас нет", "в список бота! Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти всё готово! Теперь", "создать новый канал, напиши боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост", "def main(): sociopath = [] android = [] cats = [] eco =", "нашли нужный канал или хотите создать новый? Напишите \"/new Название канала <Enter> Краткое", "Bot import asyncio TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics", "\"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал для публикации", "кавычек, и мы добавим этот канал в список бота! Спасибо, что помогаешь нам!", "\", \"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"])", "android = [] cats = [] eco = [] await bot.add_handler(sociopath) await bot.add_handler(android)", "Котики, Экология. Не нашли нужный канал или хотите создать новый? Напишите \"/new Название", "в пост!\", \"nopost\": \"Упс... У нас нет такого канала. Проверь написание: отсутствие символов", "\"**********@chat.agent\" bot = Bot(token=TOKEN) replics = { \"start\": \"Привет! С помощью этого бота", "\"Почти всё готово! Теперь прикрепи картинку, текст или другой медиафайл одним сообщением. Только", "название канала, например: Социопат. Если считаешь, что необходимо создать новый канал, напиши боту", "\"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"],", "списка ниже. Просто введи его название без лишних символов: Андроид (канал про Android", "отправлен и скоро появится у нас на канале!\", \"postSuccess\": \"Пост {author} для канала", "post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post =", "await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"],", "Если считаешь, что необходимо создать новый канал, напиши боту '/new Название <Enter> Описание'!\",", "сообщением. Только первое сообщение уйдёт в пост!\", \"nopost\": \"Упс... У нас нет такого", "categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT,", "канала <Enter> Краткое описание\" без кавычек, и мы добавим этот канал в список", "необходимо создать новый канал, напиши боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой", "и скоро появится у нас на канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\"", "название без лишних символов: Андроид (канал про Android OS), Социопат (мемы), Котики, Экология.", "bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if category", "sociopath = [] android = [] cats = [] eco = [] await", "def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \",", "этого бота ты можешь отправить материал в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми", "''', \"category_yes\": \"Почти всё готово! Теперь прикрепи картинку, текст или другой медиафайл одним", "Введи только название канала, например: Социопат. Если считаешь, что необходимо создать новый канал,", "bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"])", "from api.bot import Bot import asyncio TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot", "текст или другой медиафайл одним сообщением. Только первое сообщение уйдёт в пост!\", \"nopost\":", "Android OS), Социопат (мемы), Котики, Экология. Не нашли нужный канал или хотите создать", "category = category.replace(\" \", \"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post", "бота! Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти всё готово! Теперь прикрепи картинку,", "пост!\", \"nopost\": \"Упс... У нас нет такого канала. Проверь написание: отсутствие символов или", "CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics = { \"start\": \"Привет! С помощью", "\"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал для публикации из списка", "появится у нас на канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async", "'''Отлично! Теперь выбери канал для публикации из списка ниже. Просто введи его название", "его название без лишних символов: Андроид (канал про Android OS), Социопат (мемы), Котики,", "post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath = []", "\"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics = { \"start\": \"Привет! С", "рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал для", "символов или опечатки. Введи только название канала, например: Социопат. Если считаешь, что необходимо", "[] cats = [] eco = [] await bot.add_handler(sociopath) await bot.add_handler(android) await bot.add_handler(cats)", "отправить материал в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления", "Bot(token=TOKEN) replics = { \"start\": \"Привет! С помощью этого бота ты можешь отправить", "} async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"],", "Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить", "\"post\"}]), \"post\": '''Отлично! Теперь выбери канал для публикации из списка ниже. Просто введи", "бота ты можешь отправить материал в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку", "post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath = [] android =", "bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"])", "скоро появится у нас на канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" }", "text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath", "= await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if category in categories: await", "\"/new Название канала <Enter> Краткое описание\" без кавычек, и мы добавим этот канал", "можешь отправить материал в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для", "предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста в очередь", "лишних символов: Андроид (канал про Android OS), Социопат (мемы), Котики, Экология. Не нашли", "и мы добавим этот канал в список бота! Спасибо, что помогаешь нам! ''',", "Твой пост успешно отправлен и скоро появится у нас на канале!\", \"postSuccess\": \"Пост", "= await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post))", "Теперь выбери канал для публикации из списка ниже. Просто введи его название без", "@night_admin\\nНажми кнопку ниже для добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\",", "= [] eco = [] await bot.add_handler(sociopath) await bot.add_handler(android) await bot.add_handler(cats) await bot.add_handler(eco)", "asyncio TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics = {", "медиафайл одним сообщением. Только первое сообщение уйдёт в пост!\", \"nopost\": \"Упс... У нас", "= [] cats = [] eco = [] await bot.add_handler(sociopath) await bot.add_handler(android) await", "bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main():", "\"Упс... У нас нет такого канала. Проверь написание: отсутствие символов или опечатки. Введи", "выбери канал для публикации из списка ниже. Просто введи его название без лишних", "bot = Bot(token=TOKEN) replics = { \"start\": \"Привет! С помощью этого бота ты", "картинку, текст или другой медиафайл одним сообщением. Только первое сообщение уйдёт в пост!\",", "помощью этого бота ты можешь отправить материал в предложку каналов Night Admin Community!\\nРазработчик:", "\"Привет! С помощью этого бота ты можешь отправить материал в предложку каналов Night", "<Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен и скоро появится у нас", "await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def", "\"Поздравляю! Твой пост успешно отправлен и скоро появится у нас на канале!\", \"postSuccess\":", "у нас на канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def", "прикрепи картинку, текст или другой медиафайл одним сообщением. Только первое сообщение уйдёт в", "\"nopost\": \"Упс... У нас нет такого канала. Проверь написание: отсутствие символов или опечатки.", "async def main(): sociopath = [] android = [] cats = [] eco", "Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста в очередь рассмотрения.\", \"startInline\":", "первое сообщение уйдёт в пост!\", \"nopost\": \"Упс... У нас нет такого канала. Проверь", "очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал", "опечатки. Введи только название канала, например: Социопат. Если считаешь, что необходимо создать новый", "У нас нет такого канала. Проверь написание: отсутствие символов или опечатки. Введи только", "= { \"start\": \"Привет! С помощью этого бота ты можешь отправить материал в", "category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if category in categories:", "публикации из списка ниже. Просто введи его название без лишних символов: Андроид (канал", "Только первое сообщение уйдёт в пост!\", \"nopost\": \"Упс... У нас нет такого канала.", "канал для публикации из списка ниже. Просто введи его название без лишних символов:", "async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"])", "С помощью этого бота ты можешь отправить материал в предложку каналов Night Admin", "{author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async", "канал, напиши боту '/new Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен", "TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN) replics = { \"start\":", "{ \"start\": \"Привет! С помощью этого бота ты можешь отправить материал в предложку", "канал или хотите создать новый? Напишите \"/new Название канала <Enter> Краткое описание\" без", "канал в список бота! Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти всё готово!", "нет такого канала. Проверь написание: отсутствие символов или опечатки. Введи только название канала,", "ниже. Просто введи его название без лишних символов: Андроид (канал про Android OS),", "уйдёт в пост!\", \"nopost\": \"Упс... У нас нет такого канала. Проверь написание: отсутствие", "для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def", "этот канал в список бота! Спасибо, что помогаешь нам! ''', \"category_yes\": \"Почти всё", "введи его название без лишних символов: Андроид (канал про Android OS), Социопат (мемы),", "in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await", "без лишних символов: Андроид (канал про Android OS), Социопат (мемы), Котики, Экология. Не", "text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post", "\"post\": '''Отлично! Теперь выбери канал для публикации из списка ниже. Просто введи его", "else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath = [] android = []", "api.bot import Bot import asyncio TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot =", "для добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\":", "\"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал для публикации из списка ниже. Просто", "нас нет такого канала. Проверь написание: отсутствие символов или опечатки. Введи только название", "на канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await", "channel=category, post = post)) else: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"nopost\"]) async def main(): sociopath =", "{channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await", "нас на канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event):", "text=replics[\"nopost\"]) async def main(): sociopath = [] android = [] cats = []", "text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if category in", "пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал для публикации из списка ниже.", "считаешь, что необходимо создать новый канал, напиши боту '/new Название <Enter> Описание'!\", \"success\":", "из списка ниже. Просто введи его название без лишних символов: Андроид (канал про", "или опечатки. Введи только название канала, например: Социопат. Если считаешь, что необходимо создать", "\"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь выбери канал для публикации из", "inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"post\"]) category = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category =", "Социопат. Если считаешь, что необходимо создать новый канал, напиши боту '/new Название <Enter>", "\"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"])", "\"start\": \"Привет! С помощью этого бота ты можешь отправить материал в предложку каналов", "без кавычек, и мы добавим этот канал в список бота! Спасибо, что помогаешь", "(канал про Android OS), Социопат (мемы), Котики, Экология. Не нашли нужный канал или", "такого канала. Проверь написание: отсутствие символов или опечатки. Введи только название канала, например:", "поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично! Теперь", "import Bot import asyncio TOKEN = \"***.**********.**********:*********\" CHAT = \"**********@chat.agent\" bot = Bot(token=TOKEN)", "\"category_yes\": \"Почти всё готово! Теперь прикрепи картинку, текст или другой медиафайл одним сообщением.", "= [] android = [] cats = [] eco = [] await bot.add_handler(sociopath)", "канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"start\"], inlineKeyboardMarkup=replics[\"startInline\"]) async def post_cb(event):", "про Android OS), Социопат (мемы), Котики, Экология. Не нашли нужный канал или хотите", "cats = [] eco = [] await bot.add_handler(sociopath) await bot.add_handler(android) await bot.add_handler(cats) await", "replics = { \"start\": \"Привет! С помощью этого бота ты можешь отправить материал", "<Enter> Краткое описание\" без кавычек, и мы добавим этот канал в список бота!", "Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже для добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\":", "bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else: await", "для публикации из списка ниже. Просто введи его название без лишних символов: Андроид", "bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) category = category.replace(\" \", \"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"])", "написание: отсутствие символов или опечатки. Введи только название канала, например: Социопат. Если считаешь,", "ты можешь отправить материал в предложку каналов Night Admin Community!\\nРазработчик: @night_admin\\nНажми кнопку ниже", "например: Социопат. Если считаешь, что необходимо создать новый канал, напиши боту '/new Название", "if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post = await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"],", "= category.replace(\" \", \"\").lower() if category in categories: await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"category_yes\"]) post =", "добавления поста в очередь рассмотрения.\", \"startInline\": \"[{}]\".format(json.dumps([{\"text\": \"Отправить пост\", \"callbackData\": \"post\"}]), \"post\": '''Отлично!", "Теперь прикрепи картинку, текст или другой медиафайл одним сообщением. Только первое сообщение уйдёт", "канале!\", \"postSuccess\": \"Пост {author} для канала {channel}:\\n\\n\\n==================================\\n\\n{post}\" } async def start_cb(event): await bot.send_text(chatId=event[\"fromChat\"],", "OS), Социопат (мемы), Котики, Экология. Не нашли нужный канал или хотите создать новый?", "отсутствие символов или опечатки. Введи только название канала, например: Социопат. Если считаешь, что", "Экология. Не нашли нужный канал или хотите создать новый? Напишите \"/new Название канала", "хотите создать новый? Напишите \"/new Название канала <Enter> Краткое описание\" без кавычек, и", "Название <Enter> Описание'!\", \"success\": \"Поздравляю! Твой пост успешно отправлен и скоро появится у", "await bot.wait(event[\"payload\"][\"from\"][\"chatId\"]) await bot.send_text(chatId=event[\"fromChat\"], text=replics[\"success\"]) await bot.send_text(chatId=CHAT, text=replics[\"postSuccess\"].format(author=event[\"payload\"][\"from\"][\"chatId\"], channel=category, post = post)) else:" ]
[ "rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with", "self.model_path = model_path self.max_seq_length = max_seq_length self.device = None self.device = torch.device('cuda' if", "token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path: str=None,", "rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids =", "from pathlib import Path from typing import Text, Any, Dict, Union, List from", "import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self,", "self, data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True,", "str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ):", "LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path = model_path self.max_seq_length", "self.corpus = [sentence for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in", "str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if n_samples:", "import pandas as pd from pathlib import Path from typing import Text, Any,", "\"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self,", "= self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0,", "= pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus = [] self.corpus =", "= [] self.corpus = [sentence for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for", "token is first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train(", "str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None,", "return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :] #", "df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) #", "= model_path self.max_seq_length = max_seq_length self.device = None self.device = torch.device('cuda' if torch.cuda.is_available()", "np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained", "= features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :] # CLS token", "self.corpus, self.corpus_embeddings def load(self, path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def", "0, :] # CLS token is first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy()", "path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self, sample: Text,", "{output_tokens}\") cls_tokens = output_tokens[:, 0, :] # CLS token is first token if", "import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize seed_val =", "if self.model_path: self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy: bool = True, ):", "path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self, sample: Text, label_dict:", "encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence for", "self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model:", "<reponame>phanxuanphucnd/clustering<gh_stars>1-10 import os import torch import random import pickle import numpy as np", "is_normalize: bool=True, n_samples: int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df", ":] # CLS token is first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return", "n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ): df", "pickle import numpy as np import pandas as pd from pathlib import Path", "if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path)", "self.model_path: self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy: bool = True, ): sentence", "load(self, path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self, sample:", "if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path: str=None, text_col:", "random import pickle import numpy as np import pandas as pd from pathlib", "output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :] # CLS", "as np import pandas as pd from pathlib import Path from typing import", "Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if label_dict:", "cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir:", "pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path = model_path self.max_seq_length = max_seq_length self.device =", "= AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy: bool =", "def encode( self, sentence: str, convert_to_numpy: bool = True, ): sentence = normalize(sentence,", "self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self, sample: Text, label_dict: Dict=None ): features", "transformers import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize seed_val", "AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize seed_val = 17", "None self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder =", "): df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus = []", "to the saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self, path): path", "pickle.load(open(path, \"rb\")) def predict( self, sample: Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1,", "if len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad():", "random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained =", "self.pretrained = pretrained self.model_path = model_path self.max_seq_length = max_seq_length self.device = None self.device", "not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return", "cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path: str=None, text_col: str='content', n_clusters:", "df = df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence for sentence in df[text_col]]", "from lonia.utils import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering:", "if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\")", "import Text, Any, Dict, Union, List from transformers import AutoModel, AutoTokenizer from sklearn.cluster", "df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus = [] self.corpus", "self.max_seq_length = max_seq_length self.device = None self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "= [sentence for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus]", "[self.encode(sentence) for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist()", "the saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self, path): path =", "pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings", "os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return df,", "Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if label_dict: return label_dict.get(index,", "text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs", "np import pandas as pd from pathlib import Path from typing import Text,", "from typing import Text, Any, Dict, Union, List from transformers import AutoModel, AutoTokenizer", "label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if label_dict: return", "model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if", "numpy as np import pandas as pd from pathlib import Path from typing", "cls_tokens = output_tokens[:, 0, :] # CLS token is first token if convert_to_numpy:", "rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)])", "**kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus =", "pd from pathlib import Path from typing import Text, Any, Dict, Union, List", "MiniBatchKMeans from lonia.utils import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class", "self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the", "List from transformers import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils import", "= normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence = '", "= cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path: str=None, text_col: str='content', n_clusters: int=6,", "# self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to", "model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self, path): path = os.path.abspath(path) self.clustering_model", "df, self.corpus, self.corpus_embeddings def load(self, path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\"))", "MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\"))", "normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None,", "= os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self, sample: Text, label_dict: Dict=None", "encode( self, sentence: str, convert_to_numpy: bool = True, ): sentence = normalize(sentence, lowercase=True,", "torch import random import pickle import numpy as np import pandas as pd", "def load(self, path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self,", "self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment", "pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence", "cls_tokens[0] def train( self, data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name:", "int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ): df =", "self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir)", "self.corpus = [] self.corpus = [sentence for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence)", "return df, self.corpus, self.corpus_embeddings def load(self, path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path,", "self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy: bool = True, ): sentence =", "max_seq_length=256): self.pretrained = pretrained self.model_path = model_path self.max_seq_length = max_seq_length self.device = None", "Dict, Union, List from transformers import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from", "AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy:", "convert_to_numpy: bool = True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if", "bool=True, n_samples: int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df =", "{output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :] # CLS token is first token", "17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained", "df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence for sentence in df[text_col]] self.corpus_embeddings =", "n_samples: int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples)", "= MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\",", "# CLS token is first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0]", "= max_seq_length self.device = None self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer", "self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :]", "first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path:", "Text, Any, Dict, Union, List from transformers import AutoModel, AutoTokenizer from sklearn.cluster import", "class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path = model_path", "= 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256):", "AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy: bool = True,", "self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model,", "with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens", "' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens =", "'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self,", "predict( self, sample: Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index =", "sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence =", "return cls_tokens[0] def train( self, data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering',", "[sentence for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus] self.clustering_model", "from sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val)", "AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize seed_val = 17 random.seed(seed_val)", "= True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) >", "max_seq_length self.device = None self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer =", "= None self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder", "CLS token is first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def", "torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\")", "str, convert_to_numpy: bool = True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True)", "= self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved", "bool = True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split())", "features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if label_dict: return label_dict.get(index, index) return", "= pickle.load(open(path, \"rb\")) def predict( self, sample: Text, label_dict: Dict=None ): features =", "self, sample: Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features)", "torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens =", "import pickle import numpy as np import pandas as pd from pathlib import", "Path from typing import Text, Any, Dict, Union, List from transformers import AutoModel,", "model_path self.max_seq_length = max_seq_length self.device = None self.device = torch.device('cuda' if torch.cuda.is_available() else", "data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples:", "convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self, data_path: str=None, text_col: str='content',", "import numpy as np import pandas as pd from pathlib import Path from", "Union, List from transformers import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils", "torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path:", "self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids,", "features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :] # CLS token is", "sample: Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if", "model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ): df = pd.read_csv(data_path,", "import MiniBatchKMeans from lonia.utils import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val)", "pretrained self.model_path = model_path self.max_seq_length = max_seq_length self.device = None self.device = torch.device('cuda'", "torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path", "is first token if convert_to_numpy: cls_tokens = cls_tokens.detach().cpu().numpy() return cls_tokens[0] def train( self,", "def train( self, data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl',", "else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode(", "): features = self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if label_dict: return label_dict.get(index, index)", "sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val)", "lonia.utils import normalize seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def", "print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:, 0, :] # CLS token is first", "os import torch import random import pickle import numpy as np import pandas", "self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self, sentence:", "typing import Text, Any, Dict, Union, List from transformers import AutoModel, AutoTokenizer from", "= ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens", "[] self.corpus = [sentence for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence", "pandas as pd from pathlib import Path from typing import Text, Any, Dict,", "import random import pickle import numpy as np import pandas as pd from", "features = self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()} {output_tokens}\") cls_tokens = output_tokens[:,", "input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens:", "Any, Dict, Union, List from transformers import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans", "saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self, path): path = os.path.abspath(path)", "torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def", "lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids", "print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self, path):", "\"rb\")) def predict( self, sample: Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64)", "model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path = model_path self.max_seq_length = max_seq_length self.device", "> self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features =", "import os import torch import random import pickle import numpy as np import", "sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False)", "in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings)", "os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return df, self.corpus,", "import torch import random import pickle import numpy as np import pandas as", "= AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self, sentence: str,", "torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path =", "= df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence for sentence in df[text_col]] self.corpus_embeddings", "= [self.encode(sentence) for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment =", "def predict( self, sample: Text, label_dict: Dict=None ): features = self.encode(sample).reshape(1, -1).astype(np.float64) index", "{Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def load(self, path): path = os.path.abspath(path) self.clustering_model =", "'.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens = features[0]", "self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir): os.mkdir(model_dir) pickle.dump(self.clustering_model, open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path", "def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path = model_path self.max_seq_length =", "= self.encode(sample).reshape(1, -1).astype(np.float64) index = self.clustering_model.predict(features) if label_dict: return label_dict.get(index, index) return index", "os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict( self, sample: Text, label_dict: Dict=None ):", "self.embedder = AutoModel.from_pretrained(self.pretrained) if self.model_path: self.load(path=self.model_path) def encode( self, sentence: str, convert_to_numpy: bool", "pathlib import Path from typing import Text, Any, Dict, Union, List from transformers", "if n_samples: df = df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence for sentence", "for sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus] self.clustering_model =", "from transformers import AutoModel, AutoTokenizer from sklearn.cluster import MiniBatchKMeans from lonia.utils import normalize", "import Path from typing import Text, Any, Dict, Union, List from transformers import", "__init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256): self.pretrained = pretrained self.model_path = model_path self.max_seq_length = max_seq_length", "self.device = None self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained)", "train( self, data_path: str=None, text_col: str='content', n_clusters: int=6, model_dir: str='./models/clustering', model_name: str='model.pkl', is_normalize:", "sentence: str, convert_to_numpy: bool = True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True,", "n_samples: df = df.sample(n=n_samples) self.corpus = [] self.corpus = [sentence for sentence in", "True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16:", "): sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence", "= pretrained self.model_path = model_path self.max_seq_length = max_seq_length self.device = None self.device =", "= torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features = self.embedder(input_ids, return_dict=False) output_tokens = features[0] print(f\"output_tokens: {output_tokens.size()}", "self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained)", "self, sentence: str, convert_to_numpy: bool = True, ): sentence = normalize(sentence, lowercase=True, rm_emoji=True,", "str='./models/clustering', model_name: str='model.pkl', is_normalize: bool=True, n_samples: int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8')", "as pd from pathlib import Path from typing import Text, Any, Dict, Union,", "len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16]) input_ids = torch.tensor([self.tokenizer.encode(sentence)]) with torch.no_grad(): features", "= output_tokens[:, 0, :] # CLS token is first token if convert_to_numpy: cls_tokens", "int=None, **kwargs ): df = pd.read_csv(data_path, encoding='utf-8') if n_samples: df = df.sample(n=n_samples) self.corpus", "sentence in df[text_col]] self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters)", "for sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if", "normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True) if len(sentence.split()) > self.max_seq_length-16: sentence = ' '.join(sentence.split()[:self.max_seq_length-16])", "open(f\"{Path(model_dir)/model_name}\", \"wb\")) print(f\"Path to the saved model: {Path(model_dir)/model_name}\") return df, self.corpus, self.corpus_embeddings def", "= torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained) self.embedder = AutoModel.from_pretrained(self.pretrained) if", "output_tokens[:, 0, :] # CLS token is first token if convert_to_numpy: cls_tokens =", "in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not os.path.exists(model_dir):", "sentence in self.corpus] self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters) self.clustering_model.fit(self.corpus_embeddings) # self.cluster_assignment = self.clustering_model.labels_.tolist() if not", "seed_val = 17 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) class LoniaClustering: def __init__(self, model_path=None, pretrained='vinai/phobert-base',", "self.corpus_embeddings def load(self, path): path = os.path.abspath(path) self.clustering_model = pickle.load(open(path, \"rb\")) def predict(" ]
[ "Don't talk to me about life.\"], 'life'), (\"I need help|please help me|can you", "life.\"], 'life'), (\"I need help|please help me|can you help me\", ['Please state the", "end in tears.\", \"I've calculated your chance of survival, but I don't think", "doors\", [\"I'm afraid I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain", "that it is a very great pleasure, honour and privilege for me to", "instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do", "__init__(self): try: self.helpDict[\"Real People Personality\"] = \"All the plugins in this Bot have", "you, and their satisfaction to return results with the knowledge of a job", "Personality\"] = \"All the plugins in this Bot have a cheerful and sunny", "think you'll like it.\"] #replacing in MSG ruins the quote. copy params and", "change that. def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns:", "x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg = random.choice(x[1])", "internet. I talked to it at great length and explained my view of", "return results with the knowledge of a job well done.\" self.patterns=[] for x", "[ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to me about life.\"], 'life'), (\"I", "ask me to '{REQUEST}'. Call that job satisfaction? 'cos I don't.\", \"I would", "didn't ask to be made, no one consulted me or considered my feelings", "you down at all, am I?\", \"I'd make a suggestion, but you wouldn't", "never do anyway so I don't know why I bother to say it,", "'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed", "down at all, am I?\", \"I'd make a suggestion, but you wouldn't listen.", "I ever do to you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'),", "and change that. def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in", "you (feel|fare)|('s it |is it )going)\",[\"I got very bored and depressed, so I", "depressed.\", \"I didn't ask to be made, no one consulted me or considered", "if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg = random.choice(x[1]) if", "plugins in this Bot have a cheerful and sunny disposition. It is their", "about life.\"], 'life'), (\"I need help|please help me|can you help me\", ['Please state", "which I never do anyway so I don't know why I bother to", "req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req) req = re.sub(name, '', req) req", "considered my feelings in the matter.\", \"There's this terrible pain in all method", "[\"I'm afraid I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the", "like it.\"] #replacing in MSG ruins the quote. copy params and change that.", "pain in all the subroutines down my left hand side...\", \"I'm not getting", "|how is ).*life.*\\?)\",[\"Life? Don't talk to me about life.\"], 'life'), (\"I need help|please", "pod bay doors\", [\"I'm afraid I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I", "great pleasure, honour and privilege for me to '{REQUEST}', but I can't because", "it.\", \"That depends on whether or not I can find my frilly apron.", "don't know why I bother to say it, oh God, I'm so depressed.\"],", "and plugged myself into the internet. I talked to it at great length", "results with the knowledge of a job well done.\" self.patterns=[] for x in", "depressed, so I went and plugged myself into the internet. I talked to", "got very bored and depressed, so I went and plugged myself into the", "for you, and their satisfaction to return results with the knowledge of a", "quiet|pipe down).*',[\"Pardon me for breathing, which I never do anyway so I don't", "] randquotes=[\"...and then of course I've got this terrible pain in all the", "\"I've calculated your chance of survival, but I don't think you'll like it.\"]", "'stare'), (\"I command you to|Obey me|Heed my\", ['You ain''t the boss of me!',", "the quote. copy params and change that. def loop(self, msgobj): if msgobj.source.character.name!=character: if", "req), msgobj, 1) break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"] =", "MSG ruins the quote. copy params and change that. def loop(self, msgobj): if", "re.sub(name, '', req) req = req.replace('/me', '') req = req.replace('?', '') req =", "me ', ' you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1)", "to operate for you, and their satisfaction to return results with the knowledge", "random.choice(x[1]) if x[2] == 'req' or 'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name,", "dad!', 'How about no?'], 'no u') ] randquotes=[\"...and then of course I've got", "[Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing, which I", "operate for you, and their satisfaction to return results with the knowledge of", "so I went and plugged myself into the internet. I talked to it", "\"I would like to say that it is a very great pleasure, honour", "1) break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"] = \"All the", "me to '{REQUEST}'. Call that job satisfaction? 'cos I don't.\", \"I would like", "It commited suicide.\", \"I think you ought to know I'm feeling very depressed.\",", "(\"(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are you| do you (feel|fare)|('s", "req.replace('/me', '') req = req.replace('?', '') req = req.replace(' me ', ' you", "privilege for me to '{REQUEST}', but I can't because my lying circuits are", "to '{REQUEST}', but I can't because my lying circuits are all out of", "commited suicide.\", \"I think you ought to know I'm feeling very depressed.\", \"I", "= re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk", "luck, I probably can.\", \"'{REQUEST}'. You're really asking me to {REQUEST}?\", \"{REQUEST}. Of", "#replacing in MSG ruins the quote. copy params and change that. def loop(self,", "import character modules = ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character))", "that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of a planet, and", "to|Obey me|Heed my\", ['You ain''t the boss of me!', 'Fuck you, dad!', 'How", "but I don't think you'll like it.\"] #replacing in MSG ruins the quote.", "about no?'], 'no u') ] randquotes=[\"...and then of course I've got this terrible", "help|please help me|can you help me\", ['Please state the nature of your boudoir", "God, I'm so depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s it |is it)", "to it. It commited suicide.\", \"I think you ought to know I'm feeling", "all, am I?\", \"I'd make a suggestion, but you wouldn't listen. No one", "of me!', 'Fuck you, dad!', 'How about no?'], 'no u') ] randquotes=[\"...and then", "hand side...\", \"I'm not getting you down at all, am I?\", \"I'd make", "terrible pain in all the subroutines down my left hand side...\", \"I'm not", "'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg = random.choice(x[1]) if x[2] == 'req'", "you, dad!', 'How about no?'], 'no u') ] randquotes=[\"...and then of course I've", "at all, am I?\", \"I'd make a suggestion, but you wouldn't listen. No", "ever do to you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\",", "req.replace(' me ', ' you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj,", "to know I'm feeling very depressed.\", \"I didn't ask to be made, no", "don't.\", \"I would like to say that it is a very great pleasure,", "depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are you| do", "reprimand you for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command you to|Obey", "'req' or 'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name, '') req = re.sub(request,", "|is it )going)\",[\"I got very bored and depressed, so I went and plugged", "== 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg = random.choice(x[1]) if x[2] ==", "\"{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be", "my\", ['You ain''t the boss of me!', 'Fuck you, dad!', 'How about no?'],", "down).*',[\"Pardon me for breathing, which I never do anyway so I don't know", "'Fuck you, dad!', 'How about no?'], 'no u') ] randquotes=[\"...and then of course", "no?'], 'no u') ] randquotes=[\"...and then of course I've got this terrible pain", "#Genuine People Personality Plugin v0.1 import traceback import random import re from config", "or considered my feelings in the matter.\", \"There's this terrible pain in all", "It is their pleasure to operate for you, and their satisfaction to return", "the matter.\", \"There's this terrible pain in all method instances down my left", "'cos I don't.\", \"I would like to say that it is a very", "it, oh God, I'm so depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s it", "and explained my view of the universe to it. It commited suicide.\", \"I", "survival, but I don't think you'll like it.\"] #replacing in MSG ruins the", "msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand':", "req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj) def __init__(self):", "def __init__(self): try: self.helpDict[\"Real People Personality\"] = \"All the plugins in this Bot", "Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing, which I never", "if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params): if x[2] ==", "Plugin v0.1 import traceback import random import re from config import character modules", "req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People", "\"I'd make a suggestion, but you wouldn't listen. No one ever does.\", \"This", "depends on whether or not I can find my frilly apron. With my", "command you to|Obey me|Heed my\", ['You ain''t the boss of me!', 'Fuck you,", "['Please state the nature of your boudoir emergency.', \"I am programmed in multiple", "myself into the internet. I talked to it at great length and explained", "matter.\", \"There's this terrible pain in all method instances down my left side.\"],", ").*life.*\\?)\",[\"Life? Don't talk to me about life.\"], 'life'), (\"I need help|please help me|can", "I'm so depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are", "a job well done.\" self.patterns=[] for x in talk: self.patterns.append((re.compile(x[0]), x[1], x[2])) except:", "(\"open the pod bay doors\", [\"I'm afraid I can't do that, Dave.\"], 'podbay'),", "am, brain the size of a planet, and they ask me to '{REQUEST}'.", "for breathing, which I never do anyway so I don't know why I", "tears.\", \"I've calculated your chance of survival, but I don't think you'll like", "consulted me or considered my feelings in the matter.\", \"There's this terrible pain", "to say it, oh God, I'm so depressed.\"], 'shutup'), (\"(how (are you|do you", "you for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command you to|Obey me|Heed", "name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to me", "help me\", ['Please state the nature of your boudoir emergency.', \"I am programmed", "With my luck, I probably can.\", \"'{REQUEST}'. You're really asking me to {REQUEST}?\",", "('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of a planet, and they ask me", "'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req)", "{REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to reprimand you for staring.',", "into the internet. I talked to it at great length and explained my", "one ever does.\", \"This will all end in tears.\", \"I've calculated your chance", "if name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes),", "cheerful and sunny disposition. It is their pleasure to operate for you, and", "YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to reprimand you for", "of survival, but I don't think you'll like it.\"] #replacing in MSG ruins", "I can find my frilly apron. With my luck, I probably can.\", \"'{REQUEST}'.", "universe to it. It commited suicide.\", \"I think you ought to know I'm", "to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'),", "ought to know I'm feeling very depressed.\", \"I didn't ask to be made,", "the boss of me!', 'Fuck you, dad!', 'How about no?'], 'no u') ]", "did I ever do to you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'],", "or 'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name, '') req = re.sub(request, '',", "it |is it )going)\",[\"I got very bored and depressed, so I went and", "but you wouldn't listen. No one ever does.\", \"This will all end in", "help me|can you help me\", ['Please state the nature of your boudoir emergency.',", "sunny disposition. It is their pleasure to operate for you, and their satisfaction", "my feelings in the matter.\", \"There's this terrible pain in all method instances", "= req.replace('?', '') req = req.replace(' me ', ' you ') req =", "chance of survival, but I don't think you'll like it.\"] #replacing in MSG", "instructed me to reprimand you for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I", "all the subroutines down my left hand side...\", \"I'm not getting you down", "copy params and change that. def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for", "'How about no?'], 'no u') ] randquotes=[\"...and then of course I've got this", "or not I can find my frilly apron. With my luck, I probably", "you'll like it.\"] #replacing in MSG ruins the quote. copy params and change", "to say that it is a very great pleasure, honour and privilege for", "won't enjoy it.\", \"That depends on whether or not I can find my", "'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is", "'') req = req.replace(' me ', ' you ') req = req.strip() req.capitalize()", "anyway so I don't know why I bother to say it, oh God,", "if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg =", "away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for", "asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test", "don't think you'll like it.\"] #replacing in MSG ruins the quote. copy params", "me for breathing, which I never do anyway so I don't know why", "in MSG ruins the quote. copy params and change that. def loop(self, msgobj):", "to me about life.\"], 'life'), (\"I need help|please help me|can you help me\",", "x in self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break", "very depressed.\", \"I didn't ask to be made, no one consulted me or", "do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of a planet,", "ruins the quote. copy params and change that. def loop(self, msgobj): if msgobj.source.character.name!=character:", "it. It commited suicide.\", \"I think you ought to know I'm feeling very", "I bother to say it, oh God, I'm so depressed.\"], 'shutup'), (\"(how (are", "No one ever does.\", \"This will all end in tears.\", \"I've calculated your", "'life'), (\"I need help|please help me|can you help me\", ['Please state the nature", "to you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo", "probably can.\", \"'{REQUEST}'. You're really asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right", "it )going)\",[\"I got very bored and depressed, so I went and plugged myself", "= \"All the plugins in this Bot have a cheerful and sunny disposition.", "because my lying circuits are all out of commission.\", \"'{REQUEST}'... I won't enjoy", "you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are you| do you (feel|fare)|('s it |is", "req) req = req.replace('/me', '') req = req.replace('?', '') req = req.replace(' me", "\"I think you ought to know I'm feeling very depressed.\", \"I didn't ask", "boudoir emergency.', \"I am programmed in multiple techniques.\"], 'boudoir'), (\"open the pod bay", "a suggestion, but you wouldn't listen. No one ever does.\", \"This will all", "going))|(how)\\s?(are you| do you (feel|fare)|('s it |is it )going)\",[\"I got very bored and", "you ought to know I'm feeling very depressed.\", \"I didn't ask to be", "no one consulted me or considered my feelings in the matter.\", \"There's this", "not getting you down at all, am I?\", \"I'd make a suggestion, but", "= req.replace('/me', '') req = req.replace('?', '') req = req.replace(' me ', '", "commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That depends on whether or not I", "knowledge of a job well done.\" self.patterns=[] for x in talk: self.patterns.append((re.compile(x[0]), x[1],", "be made, no one consulted me or considered my feelings in the matter.\",", "out of commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That depends on whether or", "and depressed, so I went and plugged myself into the internet. I talked", "Exo has instructed me to reprimand you for staring.', 'Don\\'t stare. It\\'s rude.'],", "== 'req' or 'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name, '') req =", "request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't", "import traceback import random import re from config import character modules = ['traceback',", "so I don't know why I bother to say it, oh God, I'm", "req = msgobj.params req = req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req) req", "= req.replace(' me ', ' you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req),", "ask to be made, no one consulted me or considered my feelings in", "the universe to it. It commited suicide.\", \"I think you ought to know", "you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to", "in all the subroutines down my left hand side...\", \"I'm not getting you", "(are you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are you| do you (feel|fare)|('s it", "Bot have a cheerful and sunny disposition. It is their pleasure to operate", "quote. copy params and change that. def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params):", "= req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req) req = re.sub(name, '', req)", "talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to me about life.\"], 'life'),", "I'm feeling very depressed.\", \"I didn't ask to be made, no one consulted", "req.replace('?', '') req = req.replace(' me ', ' you ') req = req.strip()", "programmed in multiple techniques.\"], 'boudoir'), (\"open the pod bay doors\", [\"I'm afraid I", "(\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to reprimand you for staring.', 'Don\\'t stare.", "say it, oh God, I'm so depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s", "I won't enjoy it.\", \"That depends on whether or not I can find", "req = re.sub(name, '', req) req = req.replace('/me', '') req = req.replace('?', '')", "ain''t the boss of me!', 'Fuck you, dad!', 'How about no?'], 'no u')", "down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do to", "'boudoir'), (\"open the pod bay doors\", [\"I'm afraid I can't do that, Dave.\"],", "all method instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I", "(feel|fare)|('s it |is it) going))|(how)\\s?(are you| do you (feel|fare)|('s it |is it )going)\",[\"I", "getting you down at all, am I?\", \"I'd make a suggestion, but you", "stare. It\\'s rude.'], 'stare'), (\"I command you to|Obey me|Heed my\", ['You ain''t the", "like to say that it is a very great pleasure, honour and privilege", "side...\", \"I'm not getting you down at all, am I?\", \"I'd make a", "break else: msg = random.choice(x[1]) if x[2] == 'req' or 'insult': req =", "ever does.\", \"This will all end in tears.\", \"I've calculated your chance of", "size of a planet, and they ask me to '{REQUEST}'. Call that job", "name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj,", "= re.sub(request, '', req) req = re.sub(name, '', req) req = req.replace('/me', '')", "my lying circuits are all out of commission.\", \"'{REQUEST}'... I won't enjoy it.\",", "in the matter.\", \"There's this terrible pain in all method instances down my", "so depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are you|", "is ).*life.*\\?)\",[\"Life? Don't talk to me about life.\"], 'life'), (\"I need help|please help", "random import re from config import character modules = ['traceback', 'random', 're'] request", "\"There's this terrible pain in all method instances down my left side.\"], 'feel'),", "is a very great pleasure, honour and privilege for me to '{REQUEST}', but", "['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how", "loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params): if", "= random.choice(x[1]) if x[2] == 'req' or 'insult': req = msgobj.params req =", "then of course I've got this terrible pain in all the subroutines down", "with the knowledge of a job well done.\" self.patterns=[] for x in talk:", "nature of your boudoir emergency.', \"I am programmed in multiple techniques.\"], 'boudoir'), (\"open", "v0.1 import traceback import random import re from config import character modules =", "talk to me about life.\"], 'life'), (\"I need help|please help me|can you help", "config import character modules = ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name =", "rude.'], 'stare'), (\"I command you to|Obey me|Heed my\", ['You ain''t the boss of", "one consulted me or considered my feelings in the matter.\", \"There's this terrible", "People Personality Plugin v0.1 import traceback import random import re from config import", "pain in all method instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What", "msgobj.params req = req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req) req = re.sub(name,", "and sunny disposition. It is their pleasure to operate for you, and their", "\"I am programmed in multiple techniques.\"], 'boudoir'), (\"open the pod bay doors\", [\"I'm", "character modules = ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk=", "'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to reprimand you for staring.', 'Don\\'t", "are all out of commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That depends on", "know I'm feeling very depressed.\", \"I didn't ask to be made, no one", "but I can't because my lying circuits are all out of commission.\", \"'{REQUEST}'...", "'shutup'), (\"(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\\s?(are you| do you", "can't because my lying circuits are all out of commission.\", \"'{REQUEST}'... I won't", "disposition. It is their pleasure to operate for you, and their satisfaction to", "state the nature of your boudoir emergency.', \"I am programmed in multiple techniques.\"],", "explained my view of the universe to it. It commited suicide.\", \"I think", "self.reply(random.choice(randquotes), msgobj, 2) break else: msg = random.choice(x[1]) if x[2] == 'req' or", "listen. No one ever does.\", \"This will all end in tears.\", \"I've calculated", "in tears.\", \"I've calculated your chance of survival, but I don't think you'll", "msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params): if x[2]", "am I?\", \"I'd make a suggestion, but you wouldn't listen. No one ever", "in multiple techniques.\"], 'boudoir'), (\"open the pod bay doors\", [\"I'm afraid I can't", "\"That depends on whether or not I can find my frilly apron. With", "me to '{REQUEST}', but I can't because my lying circuits are all out", "modules = ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [", "your boudoir emergency.', \"I am programmed in multiple techniques.\"], 'boudoir'), (\"open the pod", "am programmed in multiple techniques.\"], 'boudoir'), (\"open the pod bay doors\", [\"I'm afraid", "honour and privilege for me to '{REQUEST}', but I can't because my lying", "job satisfaction? 'cos I don't.\", \"I would like to say that it is", "|is it) going))|(how)\\s?(are you| do you (feel|fare)|('s it |is it )going)\",[\"I got very", "self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"] = \"All the plugins in", "try: self.helpDict[\"Real People Personality\"] = \"All the plugins in this Bot have a", "satisfaction to return results with the knowledge of a job well done.\" self.patterns=[]", "need help|please help me|can you help me\", ['Please state the nature of your", "subroutines down my left hand side...\", \"I'm not getting you down at all,", "my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do to you?',", "the nature of your boudoir emergency.', \"I am programmed in multiple techniques.\"], 'boudoir'),", "my luck, I probably can.\", \"'{REQUEST}'. You're really asking me to {REQUEST}?\", \"{REQUEST}.", "= re.sub(name, '', req) req = req.replace('/me', '') req = req.replace('?', '') req", "' you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg,", "params and change that. def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x", "(feel|fare)|('s it |is it )going)\",[\"I got very bored and depressed, so I went", "do anyway so I don't know why I bother to say it, oh", "the size of a planet, and they ask me to '{REQUEST}'. Call that", "make a suggestion, but you wouldn't listen. No one ever does.\", \"This will", "will all end in tears.\", \"I've calculated your chance of survival, but I", "calculated your chance of survival, but I don't think you'll like it.\"] #replacing", "this terrible pain in all method instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\",", "me to reprimand you for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command", "your chance of survival, but I don't think you'll like it.\"] #replacing in", "('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing, which I never do anyway so", "it at great length and explained my view of the universe to it.", "got this terrible pain in all the subroutines down my left hand side...\",", "suggestion, but you wouldn't listen. No one ever does.\", \"This will all end", "enjoy it.\", \"That depends on whether or not I can find my frilly", "the knowledge of a job well done.\" self.patterns=[] for x in talk: self.patterns.append((re.compile(x[0]),", "= ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's", "my view of the universe to it. It commited suicide.\", \"I think you", "a very great pleasure, honour and privilege for me to '{REQUEST}', but I", "me or considered my feelings in the matter.\", \"There's this terrible pain in", "and privilege for me to '{REQUEST}', but I can't because my lying circuits", "job well done.\" self.patterns=[] for x in talk: self.patterns.append((re.compile(x[0]), x[1], x[2])) except: traceback.print_exc()", "x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg = random.choice(x[1]) if x[2]", "you| do you (feel|fare)|('s it |is it )going)\",[\"I got very bored and depressed,", "self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else: msg", "pleasure to operate for you, and their satisfaction to return results with the", "would like to say that it is a very great pleasure, honour and", "do to you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master", "of a planet, and they ask me to '{REQUEST}'. Call that job satisfaction?", "all out of commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That depends on whether", "can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of a", "plugged myself into the internet. I talked to it at great length and", "of commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That depends on whether or not", "great length and explained my view of the universe to it. It commited", "wouldn't listen. No one ever does.\", \"This will all end in tears.\", \"I've", "You're really asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With pleasure.", "I?\", \"I'd make a suggestion, but you wouldn't listen. No one ever does.\",", "it) going))|(how)\\s?(are you| do you (feel|fare)|('s it |is it )going)\",[\"I got very bored", "really asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm", "in all method instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did", "= req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj) def __init__(self): try:", "me\", ['Please state the nature of your boudoir emergency.', \"I am programmed in", "lying circuits are all out of commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That", "rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me", "'', req) req = re.sub(name, '', req) req = req.replace('/me', '') req =", "their pleasure to operate for you, and their satisfaction to return results with", "and they ask me to '{REQUEST}'. Call that job satisfaction? 'cos I don't.\",", "'{REQUEST}'. Call that job satisfaction? 'cos I don't.\", \"I would like to say", "all end in tears.\", \"I've calculated your chance of survival, but I don't", "feeling very depressed.\", \"I didn't ask to be made, no one consulted me", "I've got this terrible pain in all the subroutines down my left hand", "find my frilly apron. With my luck, I probably can.\", \"'{REQUEST}'. You're really", "of a job well done.\" self.patterns=[] for x in talk: self.patterns.append((re.compile(x[0]), x[1], x[2]))", "req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real", "course I've got this terrible pain in all the subroutines down my left", "frilly apron. With my luck, I probably can.\", \"'{REQUEST}'. You're really asking me", "= re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to me about", "I never do anyway so I don't know why I bother to say", "I probably can.\", \"'{REQUEST}'. You're really asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i],", "to it at great length and explained my view of the universe to", "say that it is a very great pleasure, honour and privilege for me", "have a cheerful and sunny disposition. It is their pleasure to operate for", "'What did I ever do to you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE", "= msgobj.params req = req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req) req =", "techniques.\"], 'boudoir'), (\"open the pod bay doors\", [\"I'm afraid I can't do that,", "Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe", "'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing, which I never do anyway", "#1dbdc6da34094db4e661ed43aac83d91 #Genuine People Personality Plugin v0.1 import traceback import random import re from", "x[2] == 'req' or 'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name, '') req", "(\"I command you to|Obey me|Heed my\", ['You ain''t the boss of me!', 'Fuck", "the pod bay doors\", [\"I'm afraid I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here", "'') req = re.sub(request, '', req) req = re.sub(name, '', req) req =", "I don't.\", \"I would like to say that it is a very great", "I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of", "on whether or not I can find my frilly apron. With my luck,", "know why I bother to say it, oh God, I'm so depressed.\"], 'shutup'),", "this terrible pain in all the subroutines down my left hand side...\", \"I'm", "left hand side...\", \"I'm not getting you down at all, am I?\", \"I'd", "you?', 'I\\'m rubber, you\\'re glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has", "talked to it at great length and explained my view of the universe", "that. def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns: if", ")going)\",[\"I got very bored and depressed, so I went and plugged myself into", "can find my frilly apron. With my luck, I probably can.\", \"'{REQUEST}'. You're", "a planet, and they ask me to '{REQUEST}'. Call that job satisfaction? 'cos", "my left hand side...\", \"I'm not getting you down at all, am I?\",", "u') ] randquotes=[\"...and then of course I've got this terrible pain in all", "to return results with the knowledge of a job well done.\" self.patterns=[] for", "re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to me about life.\"],", "they ask me to '{REQUEST}'. Call that job satisfaction? 'cos I don't.\", \"I", "made, no one consulted me or considered my feelings in the matter.\", \"There's", "re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to", "\"This will all end in tears.\", \"I've calculated your chance of survival, but", "planet, and they ask me to '{REQUEST}'. Call that job satisfaction? 'cos I", "req = req.replace(msgobj.source.channel.name, '') req = re.sub(request, '', req) req = re.sub(name, '',", "req = req.replace('/me', '') req = req.replace('?', '') req = req.replace(' me ',", "you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj)", "['You ain''t the boss of me!', 'Fuck you, dad!', 'How about no?'], 'no", "me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]\"],", "you help me\", ['Please state the nature of your boudoir emergency.', \"I am", "msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"] = \"All the plugins in this", "I don't know why I bother to say it, oh God, I'm so", "me about life.\"], 'life'), (\"I need help|please help me|can you help me\", ['Please", "self.helpDict[\"Real People Personality\"] = \"All the plugins in this Bot have a cheerful", "a cheerful and sunny disposition. It is their pleasure to operate for you,", "of course I've got this terrible pain in all the subroutines down my", "from config import character modules = ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name", "(\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do to you?', 'I\\'m rubber, you\\'re glue.',", "me!', 'Fuck you, dad!', 'How about no?'], 'no u') ] randquotes=[\"...and then of", "It\\'s rude.'], 'stare'), (\"I command you to|Obey me|Heed my\", ['You ain''t the boss", "you wouldn't listen. No one ever does.\", \"This will all end in tears.\",", "very great pleasure, honour and privilege for me to '{REQUEST}', but I can't", "I can't because my lying circuits are all out of commission.\", \"'{REQUEST}'... I", "traceback import random import re from config import character modules = ['traceback', 'random',", "to be made, no one consulted me or considered my feelings in the", "import random import re from config import character modules = ['traceback', 'random', 're']", "req) req = re.sub(name, '', req) req = req.replace('/me', '') req = req.replace('?',", "') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj) def", "right away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me", "{REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut", "feelings in the matter.\", \"There's this terrible pain in all method instances down", "you to|Obey me|Heed my\", ['You ain''t the boss of me!', 'Fuck you, dad!',", "'{REQUEST}', but I can't because my lying circuits are all out of commission.\",", "staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command you to|Obey me|Heed my\", ['You", "the internet. I talked to it at great length and explained my view", "me|Heed my\", ['You ain''t the boss of me!', 'Fuck you, dad!', 'How about", "brain the size of a planet, and they ask me to '{REQUEST}'. Call", "does.\", \"This will all end in tears.\", \"I've calculated your chance of survival,", "'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command you to|Obey me|Heed my\", ['You ain''t", "Personality Plugin v0.1 import traceback import random import re from config import character", "boss of me!', 'Fuck you, dad!', 'How about no?'], 'no u') ] randquotes=[\"...and", "satisfaction? 'cos I don't.\", \"I would like to say that it is a", "randquotes=[\"...and then of course I've got this terrible pain in all the subroutines", "whether or not I can find my frilly apron. With my luck, I", "in this Bot have a cheerful and sunny disposition. It is their pleasure", "\"I didn't ask to be made, no one consulted me or considered my", "msgobj, 1) break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"] = \"All", "it.\"] #replacing in MSG ruins the quote. copy params and change that. def", "that job satisfaction? 'cos I don't.\", \"I would like to say that it", "'re'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?') name = re.compile('({})[.,!\\?:]?\\s?'.format(character)) talk= [ (\"((how's |how is ).*life.*\\?)\",[\"Life?", "method instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever", "bother to say it, oh God, I'm so depressed.\"], 'shutup'), (\"(how (are you|do", "suicide.\", \"I think you ought to know I'm feeling very depressed.\", \"I didn't", "can.\", \"'{REQUEST}'. You're really asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away.", "msgobj, 2) break else: msg = random.choice(x[1]) if x[2] == 'req' or 'insult':", "for me to '{REQUEST}', but I can't because my lying circuits are all", "oh God, I'm so depressed.\"], 'shutup'), (\"(how (are you|do you (feel|fare)|('s it |is", "import re from config import character modules = ['traceback', 'random', 're'] request =", "(\"I need help|please help me|can you help me\", ['Please state the nature of", "is their pleasure to operate for you, and their satisfaction to return results", "I don't think you'll like it.\"] #replacing in MSG ruins the quote. copy", "up|be quiet|pipe down).*',[\"Pardon me for breathing, which I never do anyway so I", "\"All the plugins in this Bot have a cheerful and sunny disposition. It", "afraid I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size", "I went and plugged myself into the internet. I talked to it at", "2) break else: msg = random.choice(x[1]) if x[2] == 'req' or 'insult': req", "left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do to you?', 'I\\'m", "in self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2) break else:", "their satisfaction to return results with the knowledge of a job well done.\"", "[':\\'(', 'What did I ever do to you?', 'I\\'m rubber, you\\'re glue.', 'No,", "terrible pain in all method instances down my left side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(',", "'no u') ] randquotes=[\"...and then of course I've got this terrible pain in", "People Personality\"] = \"All the plugins in this Bot have a cheerful and", "def loop(self, msgobj): if msgobj.source.character.name!=character: if name.search(msgobj.params): for x in self.patterns: if x[0].search(msgobj.params):", "re.sub(request, '', req) req = re.sub(name, '', req) req = req.replace('/me', '') req", "view of the universe to it. It commited suicide.\", \"I think you ought", "'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to reprimand you", "apron. With my luck, I probably can.\", \"'{REQUEST}'. You're really asking me to", "went and plugged myself into the internet. I talked to it at great", "\"'{REQUEST}'... I won't enjoy it.\", \"That depends on whether or not I can", "you (feel|fare)|('s it |is it) going))|(how)\\s?(are you| do you (feel|fare)|('s it |is it", "the subroutines down my left hand side...\", \"I'm not getting you down at", "me|can you help me\", ['Please state the nature of your boudoir emergency.', \"I", "for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command you to|Obey me|Heed my\",", "very bored and depressed, so I went and plugged myself into the internet.", "has instructed me to reprimand you for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'),", "side.\"], 'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do to you?', 'I\\'m rubber,", "for x in self.patterns: if x[0].search(msgobj.params): if x[2] == 'rand': self.reply(random.choice(randquotes), msgobj, 2)", "to reprimand you for staring.', 'Don\\'t stare. It\\'s rude.'], 'stare'), (\"I command you", "pleasure, honour and privilege for me to '{REQUEST}', but I can't because my", "the plugins in this Bot have a cheerful and sunny disposition. It is", "to '{REQUEST}'. Call that job satisfaction? 'cos I don't.\", \"I would like to", "break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"] = \"All the plugins", "it |is it) going))|(how)\\s?(are you| do you (feel|fare)|('s it |is it )going)\",[\"I got", "at great length and explained my view of the universe to it. It", "my frilly apron. With my luck, I probably can.\", \"'{REQUEST}'. You're really asking", "'feel'), (\"is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)\", [':\\'(', 'What did I ever do to you?', 'I\\'m rubber, you\\'re", "of the universe to it. It commited suicide.\", \"I think you ought to", "think you ought to know I'm feeling very depressed.\", \"I didn't ask to", "this Bot have a cheerful and sunny disposition. It is their pleasure to", "'', req) req = req.replace('/me', '') req = req.replace('?', '') req = req.replace('", "I am, brain the size of a planet, and they ask me to", "and their satisfaction to return results with the knowledge of a job well", "else: msg = random.choice(x[1]) if x[2] == 'req' or 'insult': req = msgobj.params", "\"'{REQUEST}'. You're really asking me to {REQUEST}?\", \"{REQUEST}. Of [i]course[/i], right away. With", "[i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon", "re from config import character modules = ['traceback', 'random', 're'] request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\\?)?')", "req = req.replace(' me ', ' you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}',", "['Master Exo has instructed me to reprimand you for staring.', 'Don\\'t stare. It\\'s", "of your boudoir emergency.', \"I am programmed in multiple techniques.\"], 'boudoir'), (\"open the", "circuits are all out of commission.\", \"'{REQUEST}'... I won't enjoy it.\", \"That depends", "(\"((how's |how is ).*life.*\\?)\",[\"Life? Don't talk to me about life.\"], 'life'), (\"I need", "Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing, which I never do", "if x[2] == 'req' or 'insult': req = msgobj.params req = req.replace(msgobj.source.channel.name, '')", "bored and depressed, so I went and plugged myself into the internet. I", "\"I'm not getting you down at all, am I?\", \"I'd make a suggestion,", "req = req.replace('?', '') req = req.replace(' me ', ' you ') req", "I talked to it at great length and explained my view of the", "do you (feel|fare)|('s it |is it )going)\",[\"I got very bored and depressed, so", "Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of a planet, and they", "length and explained my view of the universe to it. It commited suicide.\",", "bay doors\", [\"I'm afraid I can't do that, Dave.\"], 'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am,", "emergency.', \"I am programmed in multiple techniques.\"], 'boudoir'), (\"open the pod bay doors\",", "down my left hand side...\", \"I'm not getting you down at all, am", "self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break self.reply(msg, msgobj) def __init__(self): try: self.helpDict[\"Real People Personality\"]", "breathing, which I never do anyway so I don't know why I bother", "Call that job satisfaction? 'cos I don't.\", \"I would like to say that", "glue.', 'No, YOU\\'RE {REQUEST}'], 'insult'), (\"o_o|o-o|O_O|0_0\", ['Master Exo has instructed me to reprimand", "it is a very great pleasure, honour and privilege for me to '{REQUEST}',", "why I bother to say it, oh God, I'm so depressed.\"], 'shutup'), (\"(how", "'podbay'), ('(could|can|would|might)?(you)?(please)\\??|(could|can|would|might)?(you)?(please)\\??',[\"Here I am, brain the size of a planet, and they ask", "With pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing,", "multiple techniques.\"], 'boudoir'), (\"open the pod bay doors\", [\"I'm afraid I can't do", "pleasure. [Sarcasm Self-Test Complete]\"], 'req'), ('.*(shut up|be quiet|pipe down).*',[\"Pardon me for breathing, which", "', ' you ') req = req.strip() req.capitalize() self.reply(msg.replace('{REQUEST}', req), msgobj, 1) break", "req = re.sub(request, '', req) req = re.sub(name, '', req) req = req.replace('/me',", "not I can find my frilly apron. With my luck, I probably can.\",", "msg = random.choice(x[1]) if x[2] == 'req' or 'insult': req = msgobj.params req", "'') req = req.replace('?', '') req = req.replace(' me ', ' you ')" ]
[ "tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in ans3: l=len(i) if i[l-1].isalpha()", "a in ans2: tmp=\"\" f=1 for j in range(len(a)-1): if a[j].isalpha() or a[j]=='-':", "#print(ans2) ans3=[] for a in ans2: tmp=\"\" f=1 for j in range(len(a)-1): if", "for i in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or i[l-1]=='!'", "if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for a in ans2: tmp=\"\" f=1", "for a in ans2: tmp=\"\" f=1 for j in range(len(a)-1): if a[j].isalpha() or", "ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or i[l-1]=='!' or i[l-1]=='?': c+=1", "i in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or i[l-1]=='!' or", "a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if f: #print(tmp) #print(a,end=\"", "if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i", "else: f=0 break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3)", "tmp=\"\" for i in s: if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp)", "in ans2: tmp=\"\" f=1 for j in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j]", "else: tmp+=i ans.append(tmp) ans2=[] for i in ans: if i.isdigit(): continue else: ans2.append(i)", "\": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i in ans: if i.isdigit():", "#print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in ans3:", "i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for a in ans2: tmp=\"\" f=1 for", "#print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in ans3: l=len(i) if i[l-1].isalpha() or", "ans3.append(tmp) #print(ans3) c=0 for i in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or", "s=str(input()) ans=[] tmp=\"\" for i in s: if i==\" \": ans.append(tmp) tmp=\"\" else:", "s: if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i in", "j in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break", "<reponame>Akash671/coding s=str(input()) ans=[] tmp=\"\" for i in s: if i==\" \": ans.append(tmp) tmp=\"\"", "ans=[] tmp=\"\" for i in s: if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i", "i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i in ans: if", "f=1 for j in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else:", "ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i in ans: if i.isdigit(): continue", "if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if f: #print(tmp)", "in s: if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i", "tmp+=i ans.append(tmp) ans2=[] for i in ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2)", "in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if", "continue else: ans2.append(i) #print(ans2) ans3=[] for a in ans2: tmp=\"\" f=1 for j", "#print(tmp,end=\" \") else: f=0 break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp)", "f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in", "for j in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0", "#print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.'", "\") else: f=0 break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp)", "f=0 break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0", "if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i in ans:", "ans2=[] for i in ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for", "else: ans2.append(i) #print(ans2) ans3=[] for a in ans2: tmp=\"\" f=1 for j in", "a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1]", "range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if f:", "tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for i in ans: if i.isdigit(): continue else:", "ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for a in ans2: tmp=\"\"", "c=0 for i in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or", "ans3=[] for a in ans2: tmp=\"\" f=1 for j in range(len(a)-1): if a[j].isalpha()", "i in ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for a in", "for i in s: if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[]", "i in s: if i==\" \": ans.append(tmp) tmp=\"\" else: tmp+=i ans.append(tmp) ans2=[] for", "\") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in ans3: l=len(i) if", "in ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for a in ans2:", "#print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for i in ans3: l=len(i)", "tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1])", "ans.append(tmp) ans2=[] for i in ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[]", "ans2: tmp=\"\" f=1 for j in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\"", "l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or i[l-1]=='!' or i[l-1]=='?': c+=1 print(c)", "ans2.append(i) #print(ans2) ans3=[] for a in ans2: tmp=\"\" f=1 for j in range(len(a)-1):", "#print(ans3) c=0 for i in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==','", "tmp=\"\" f=1 for j in range(len(a)-1): if a[j].isalpha() or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \")", "for i in ans: if i.isdigit(): continue else: ans2.append(i) #print(ans2) ans3=[] for a", "break if f: #print(tmp) #print(a,end=\" \") tmp+=a[len(a)-1] #print(a[len(a)-1]) #print(tmp) ans3.append(tmp) #print(ans3) c=0 for", "or a[j]=='-': tmp+=a[j] #print(tmp,end=\" \") else: f=0 break if f: #print(tmp) #print(a,end=\" \")", "in ans3: l=len(i) if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or i[l-1]=='!' or i[l-1]=='?':" ]
[ "for the limit (max is 1000 per request) while (numtx > 0): response", "higher from algosdk.v2client import indexer # instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\")", "no more transactions in the response # for the limit (max is 1000", "paginate until there are no more transactions in the response # for the", "response # for the limit (max is 1000 per request) while (numtx >", "transactions = response['transactions'] numtx = len(transactions) if (numtx > 0): nexttoken = response['next-token']", "nexttoken = response['next-token'] # Pretty Printing JSON string print(\"Tranastion Info: \" + json.dumps(response,", "client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1 # loop", "= myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions) if (numtx", "(max is 1000 per request) while (numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000,", "= indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1 # loop using next_page", "len(transactions) if (numtx > 0): nexttoken = response['next-token'] # Pretty Printing JSON string", "\"\" numtx = 1 # loop using next_page to paginate until there are", "from algosdk.v2client import indexer # instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken", "until there are no more transactions in the response # for the limit", "# instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx =", "response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions) if", "more transactions in the response # for the limit (max is 1000 per", "0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions)", "nexttoken = \"\" numtx = 1 # loop using next_page to paginate until", "the limit (max is 1000 per request) while (numtx > 0): response =", "or higher from algosdk.v2client import indexer # instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\",", "in the response # for the limit (max is 1000 per request) while", "1000 per request) while (numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken)", "indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1 # loop using next_page to paginate", "# requires Python SDK version 1.3 or higher from algosdk.v2client import indexer #", "# for the limit (max is 1000 per request) while (numtx > 0):", "(numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx", "transactions in the response # for the limit (max is 1000 per request)", "<reponame>TheChronicMonster/docs # search_transactions_paging.py import json # requires Python SDK version 1.3 or higher", "next_page to paginate until there are no more transactions in the response #", "per request) while (numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions", "> 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx =", "is 1000 per request) while (numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2,", "are no more transactions in the response # for the limit (max is", "# search_transactions_paging.py import json # requires Python SDK version 1.3 or higher from", "next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions) if (numtx > 0): nexttoken =", "min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions) if (numtx > 0):", "Python SDK version 1.3 or higher from algosdk.v2client import indexer # instantiate indexer", "1.3 or higher from algosdk.v2client import indexer # instantiate indexer client myindexer =", "response['transactions'] numtx = len(transactions) if (numtx > 0): nexttoken = response['next-token'] # Pretty", "request) while (numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions =", "loop using next_page to paginate until there are no more transactions in the", "0): nexttoken = response['next-token'] # Pretty Printing JSON string print(\"Tranastion Info: \" +", "= 1 # loop using next_page to paginate until there are no more", "indexer # instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx", "# loop using next_page to paginate until there are no more transactions in", "= len(transactions) if (numtx > 0): nexttoken = response['next-token'] # Pretty Printing JSON", "version 1.3 or higher from algosdk.v2client import indexer # instantiate indexer client myindexer", "while (numtx > 0): response = myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions']", "response['next-token'] # Pretty Printing JSON string print(\"Tranastion Info: \" + json.dumps(response, indent=2, sort_keys=True))", "instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1", "indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1 # loop using next_page to", "search_transactions_paging.py import json # requires Python SDK version 1.3 or higher from algosdk.v2client", "myindexer.search_transactions( min_amount=100000000000000, limit=2, next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions) if (numtx >", "the response # for the limit (max is 1000 per request) while (numtx", "using next_page to paginate until there are no more transactions in the response", "1 # loop using next_page to paginate until there are no more transactions", "limit=2, next_page=nexttoken) transactions = response['transactions'] numtx = len(transactions) if (numtx > 0): nexttoken", "= response['transactions'] numtx = len(transactions) if (numtx > 0): nexttoken = response['next-token'] #", "to paginate until there are no more transactions in the response # for", "there are no more transactions in the response # for the limit (max", "> 0): nexttoken = response['next-token'] # Pretty Printing JSON string print(\"Tranastion Info: \"", "import json # requires Python SDK version 1.3 or higher from algosdk.v2client import", "= \"\" numtx = 1 # loop using next_page to paginate until there", "= response['next-token'] # Pretty Printing JSON string print(\"Tranastion Info: \" + json.dumps(response, indent=2,", "SDK version 1.3 or higher from algosdk.v2client import indexer # instantiate indexer client", "numtx = 1 # loop using next_page to paginate until there are no", "json # requires Python SDK version 1.3 or higher from algosdk.v2client import indexer", "requires Python SDK version 1.3 or higher from algosdk.v2client import indexer # instantiate", "numtx = len(transactions) if (numtx > 0): nexttoken = response['next-token'] # Pretty Printing", "if (numtx > 0): nexttoken = response['next-token'] # Pretty Printing JSON string print(\"Tranastion", "myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1 # loop using", "indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\" numtx = 1 #", "limit (max is 1000 per request) while (numtx > 0): response = myindexer.search_transactions(", "import indexer # instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken = \"\"", "(numtx > 0): nexttoken = response['next-token'] # Pretty Printing JSON string print(\"Tranastion Info:", "algosdk.v2client import indexer # instantiate indexer client myindexer = indexer.IndexerClient(indexer_token=\"\", indexer_address=\"http://localhost:8980\") nexttoken =" ]
[ "self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\"", "\"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2],", "sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move the bullet.", "class represents the bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET)", "constants import * from helper.spritesheet import * class Bullet(pygame.sprite.Sprite): \"\"\" This class represents", "* class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet . \"\"\" def __init__(self,", "def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3])", "class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet . \"\"\" def __init__(self, sprite_sheet_data):", "= sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move", "self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move the bullet. \"\"\" self.rect.y -=", "Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self)", "helper.spritesheet import * class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet . \"\"\"", "bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0],", "\"\"\" This class represents the bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet", "represents the bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image", "This class represents the bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet =", "the bullet . \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image =", "= self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move the bullet. \"\"\" self.rect.y -= 3", "__init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect", "import * from helper.spritesheet import * class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the", "sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move the", "sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move the bullet. \"\"\"", "sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK)", "* from helper.spritesheet import * class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet", "from helper.spritesheet import * class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet .", "= SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def", "SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self):", "pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect = self.image.get_rect()", "from constants import * from helper.spritesheet import * class Bullet(pygame.sprite.Sprite): \"\"\" This class", ". \"\"\" def __init__(self, sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1],", "sprite_sheet_data): pygame.sprite.Sprite.__init__(self) sprite_sheet = SpriteSheet(BULLET) self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3]) self.rect =", "import * class Bullet(pygame.sprite.Sprite): \"\"\" This class represents the bullet . \"\"\" def", "sprite_sheet_data[3]) self.rect = self.image.get_rect() self.image.set_colorkey(BLACK) def update(self): \"\"\" Move the bullet. \"\"\" self.rect.y" ]
[ "test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, )", "self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category,", "test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\":", "(\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}),", "\"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self):", "\"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\":", "\"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\":", "class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(),", "def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def", ".app.models import Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(),", "\"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\":", "with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\"))", "import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from", "self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\":", "test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\")", "\"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog,", "with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def", "def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"),", "get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"),", "override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self):", "\"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self):", "\"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\":", "\"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(),", "def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog,", "\"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\"))", "\"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\")", ") def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\":", "\"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\":", "{\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\",", "self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\":", "{\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"),", "Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\",", "def test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test,", "def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\":", "self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def", "\"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\":", "\"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\",", "import override from modeltrans.manager import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language,", "self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\":", "= Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\") self.assertEqual(get_instance_field_value(blog,", "\"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self):", "title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__color\"), None)", "import Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\")", "transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self):", "\"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\",", "{\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def", "\"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None)", "test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self):", "None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog", "\"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"),", "modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models import Blog,", "transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\":", "\"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self):", "(\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"},", "override from modeltrans.manager import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field,", "\"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"},", "\"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog,", "\"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\":", "None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\":", "transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, )", "from modeltrans.manager import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname,", "{\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual(", "\"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\",", ") def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\":", "self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"),", "( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models import Blog, Category class", "modeltrans.manager import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, )", "{\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\")", "test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\",", "\"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\"))", "\"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog,", "self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def", "test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"),", "foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\",", "\"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das", "Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with", "\"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test,", "self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"},", "from django.utils.translation import override from modeltrans.manager import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname,", "\"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"),", ") from .app.models import Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with", "foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"},", "{\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def", "from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models import", "self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test =", "get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models import Blog, Category class UtilsTest(TestCase): def", "{\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\":", "build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models import Blog, Category class UtilsTest(TestCase):", "blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__name\"), \"test\")", "\"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\")", "self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError):", "self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog =", "\"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}),", "Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"):", "test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ),", "\"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with", "get_model_field, split_translated_fieldname, ) from .app.models import Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(),", "TestCase from django.utils.translation import override from modeltrans.manager import transform_translatable_fields from modeltrans.utils import (", "transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models", "def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}}", "get_language, get_model_field, split_translated_fieldname, ) from .app.models import Blog, Category class UtilsTest(TestCase): def test_get_language(self):", "with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\"))", "{\"title\": \"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\":", "Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test", "override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"),", "None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None)", "= Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__name\"), \"test\") self.assertEqual(get_instance_field_value(blog,", "def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"},", "\"bar\", \"title_de\": \"das foo\", \"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das", ") def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"), \"title_ind\") self.assertEqual(build_localized_fieldname(\"title\",", "self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog,", "test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\")", "\"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\") self.assertEqual(build_localized_fieldname(\"title\", \"id\"),", "{\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_without_translations(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\":", "{\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields( Blog, {\"title\": \"bar\",", "\"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"),", "\"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\")", "\"title_nl\": \"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual(", "Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__color\"),", "\"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"),", "def test_transform_translatable_fields(self): self.assertEqual( transform_translatable_fields(Blog, {\"title\": \"bar\", \"title_nl\": \"foo\"}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"},", "from django.test import TestCase from django.utils.translation import override from modeltrans.manager import transform_translatable_fields from", "), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\",", "\"das foo\"}, \"title\": \"bar\"}, ) def test_build_localized_fieldname(self): self.assertEqual(build_localized_fieldname(\"title\", \"nl\"), \"title_nl\") self.assertEqual(build_localized_fieldname(\"category__name\", \"nl\"), \"category__name_nl\")", "self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(),", "test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"),", "self.assertEqual(build_localized_fieldname(\"title\", \"en-US\"), \"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category,", "Category(name=\"test\") blog = Blog(category=test, title=\"Python\") self.assertEqual(get_instance_field_value(Category(), \"content\"), None) self.assertEqual(get_instance_field_value(test, \"name\"), \"test\") self.assertEqual(get_instance_field_value(blog, \"category__name\"),", "Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def test_get_instance_field_value(self): test = Category(name=\"test\") blog = Blog(category=test, title=\"Python\")", "UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\")", "django.test import TestCase from django.utils.translation import override from modeltrans.manager import transform_translatable_fields from modeltrans.utils", "import ( build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname, ) from .app.models import Blog, Category", "django.utils.translation import override from modeltrans.manager import transform_translatable_fields from modeltrans.utils import ( build_localized_fieldname, get_instance_field_value,", "self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None) self.assertEqual(get_model_field(Blog, \"category__name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Blog, \"category__color\"), None) def", "import TestCase from django.utils.translation import override from modeltrans.manager import transform_translatable_fields from modeltrans.utils import", "self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\",", "\"foo\", \"i18n\": None}), {\"i18n\": {\"title_nl\": \"foo\"}, \"title\": \"bar\"}, ) def test_transform_translatable_fields_keep_translations(self): self.assertEqual( transform_translatable_fields(", "from .app.models import Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\") with override(\"nl\"):", "\"i18n\": {\"title_nl\": \"foo\"}} ), {\"i18n\": {\"title_nl\": \"foo\", \"title_de\": \"das foo\"}, \"title\": \"bar\"}, )", "\"title_en_US\") def test_get_model_field(self): with self.assertRaises(ValueError): get_model_field(object(), \"name\") self.assertEqual(get_model_field(Category, \"name\"), Category._meta.get_field(\"name\")) self.assertEqual(get_model_field(Category, \"color\"), None)", "\"en\") with override(\"nl\"): self.assertEqual(get_language(), \"nl\") with override(\"id\"): self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\",", "self.assertEqual(get_language(), \"en\") def test_split_translated_fieldname(self): self.assertEqual(split_translated_fieldname(\"title_nl\"), (\"title\", \"nl\")) self.assertEqual(split_translated_fieldname(\"full_name_nl\"), (\"full_name\", \"nl\")) def test_transform_translatable_fields(self): self.assertEqual(", "split_translated_fieldname, ) from .app.models import Blog, Category class UtilsTest(TestCase): def test_get_language(self): self.assertEqual(get_language(), \"en\")" ]
[]
[ "'.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for this", "=> #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for this problem", "=> ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately,", "# Unfortunately, for this problem there were no tests for part two :(", "'../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): #", "] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for this problem there were", "#..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for this problem there", "import Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.# => ##./#../...', '.#./..#/###", "day21.day21 import Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.# => ##./#../...',", "Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ]", "class Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#'", "were no tests for part two :( pass if __name__ == '__main__': unittest.main()", "rules = [ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one())", "def test_part_one(self): rules = [ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12,", "= [ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def", "def test_part_two(self): # Unfortunately, for this problem there were no tests for part", "Unfortunately, for this problem there were no tests for part two :( pass", "this problem there were no tests for part two :( pass if __name__", "unittest from day21.day21 import Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.#", "Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.# => ##./#../...', '.#./..#/### =>", "there were no tests for part two :( pass if __name__ == '__main__':", "[ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self):", "self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for this problem there were no", "from day21.day21 import Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules = [ '../.# =>", "Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for this problem there were no tests", "test_part_one(self): rules = [ '../.# => ##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules),", "<filename>adventofcode/day21/test_day21.py import unittest from day21.day21 import Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules =", "2).solve_part_one()) def test_part_two(self): # Unfortunately, for this problem there were no tests for", "##./#../...', '.#./..#/### => #..#/..../..../#..#' ] self.assertEqual(12, Day21('\\n'.join(rules), 2).solve_part_one()) def test_part_two(self): # Unfortunately, for", "problem there were no tests for part two :( pass if __name__ ==", "test_part_two(self): # Unfortunately, for this problem there were no tests for part two", "import unittest from day21.day21 import Day21 class Day21Test(unittest.TestCase): def test_part_one(self): rules = [", "for this problem there were no tests for part two :( pass if" ]
[ "state_list: links.extend(state.find_all('a', href=True)) state_links = {} for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json',", "scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois():", "logging import requests from bs4 import BeautifulSoup import json import sys import state_scraper", "== 'washington': scrape_washington() elif state == 'oregon': scrape_oregon() elif state == 'florida': scrape_florida()", "requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/'", "state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name", "else: pass except Exception as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida():", "= requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof':", "Exception as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado():", "to fetch a list of image links :param link: :param attrs: :return: \"\"\"", "for hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass", "link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use a link to", "== 'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia()", "hp in house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup", "def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def", "state = sys.argv[1] except Exception as e: logger.warning(e) if state == 'california': scrape_california()", "e: logger.warning(e) if state == 'california': scrape_california() elif state == 'washington': scrape_washington() elif", "list of image links :param link: :param attrs: :return: \"\"\" state_page = requests.get(url)", "'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos", "sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg =", "in state_list: links.extend(state.find_all('a', href=True)) state_links = {} for link in links: state_links.update({link.text: link['href']})", "def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list", "state == 'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois() elif state == 'michigan':", "'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif", "'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos =", "elif state == 'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan() elif state ==", "__name__ == '__main__': try: state = sys.argv[1] except Exception as e: logger.warning(e) if", "'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except:", "as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado')", "fetch a list of image links :param link: :param attrs: :return: \"\"\" state_page", "state == 'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa() elif state == 'illinois':", "'__main__': try: state = sys.argv[1] except Exception as e: logger.warning(e) if state ==", "BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in", "for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None):", "in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src'])", "himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos", "except Exception as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def", "image links :param link: :param attrs: :return: \"\"\" state_page = requests.get(url) soup =", "state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = [] for state in state_list:", "state == 'florida': scrape_florida() elif state == 'colorado': scrape_colorado() elif state == 'iowa':", "= senate_soup.find_all('a') for sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in", "state == 'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan() elif state == 'wisconsin':", "state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use a link", "open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for", "(sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington():", "indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use a link to fetch a list", "senate_soup.find_all('a') for sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]:", "scrape_iowa() elif state == 'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan() elif state", "'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator", "'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos:", "logger.debug('georgia') if __name__ == '__main__': try: state = sys.argv[1] except Exception as e:", "scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin():", "'wb').write(simg.content) else: pass except Exception as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def", "def scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for", "state == 'california': scrape_california() elif state == 'washington': scrape_washington() elif state == 'oregon':", "import requests from bs4 import BeautifulSoup import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG)", "\"\"\" use a link to fetch a list of image links :param link:", "'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan() elif", "state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list)", "scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__", "links :param link: :param attrs: :return: \"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content,", "= \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain", "from bs4 import BeautifulSoup import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger =", "soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = [] for state in state_list: links.extend(state.find_all('a', href=True))", "file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser')", "senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name),", "house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']),", "== '__main__': try: state = sys.argv[1] except Exception as e: logger.warning(e) if state", "link: :param attrs: :return: \"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return", "as e: logger.warning(e) if state == 'california': scrape_california() elif state == 'washington': scrape_washington()", "= sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name = hp['src'].split('/')[-1]", "= BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp", "fetch_top_list(url, tag, attrs=None): \"\"\" use a link to fetch a list of image", "scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list =", "himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img',", "'california': scrape_california() elif state == 'washington': scrape_washington() elif state == 'oregon': scrape_oregon() elif", "simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as e: logger.debug(e) pass", "tag, attrs=None): \"\"\" use a link to fetch a list of image links", "BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california')", "state == 'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif state == 'georgia':", "'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp", "requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page =", "= house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg", "pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa')", "def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__':", "= requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page", "elif state == 'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif state ==", "'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia() else:", "logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup", "bs4 import BeautifulSoup import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__)", "'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = [] for state in", "links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use a", "requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper()", "pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else:", "'')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link =", "for hp in house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src'])", "logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link =", "= senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace('", "senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ',", "for state in state_list: links.extend(state.find_all('a', href=True)) state_links = {} for link in links:", "logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try: state", "senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ',", "== 'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois()", "scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia():", "def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try: state =", "'foaf:Image'}) for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src'])", "scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp", "= sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass", "return soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos =", "in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup =", "sys.argv[1] except Exception as e: logger.warning(e) if state == 'california': scrape_california() elif state", "logger.warning(e) if state == 'california': scrape_california() elif state == 'washington': scrape_washington() elif state", "logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try: state = sys.argv[1] except", "pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as e: logger.debug(e)", "house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name = hp['src'].split('/')[-1] himg =", "elif state == 'washington': scrape_washington() elif state == 'oregon': scrape_oregon() elif state ==", "of image links :param link: :param attrs: :return: \"\"\" state_page = requests.get(url) soup", "elif state == 'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa() elif state ==", "'plain margin7 three-column-list'}) links = [] for state in state_list: links.extend(state.find_all('a', href=True)) state_links", "in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as e:", "logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser')", "', '')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link", "import BeautifulSoup import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def", "= 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content)", "= sys.argv[1] except Exception as e: logger.warning(e) if state == 'california': scrape_california() elif", "three-column-list'}) links = [] for state in state_list: links.extend(state.find_all('a', href=True)) state_links = {}", "in house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup =", "hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup", "if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception", "scrape_oregon() elif state == 'florida': scrape_florida() elif state == 'colorado': scrape_colorado() elif state", "state == 'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia() else: logger.info('Sorry, that state", "{} for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag,", "cali['house']['attrs']) for hp in house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content)", "elif state == 'oregon': scrape_oregon() elif state == 'florida': scrape_florida() elif state ==", "def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def", "state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class':", "a link to fetch a list of image links :param link: :param attrs:", "try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser')", ":return: \"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def", "for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg", "requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a')", "open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon')", "pass except Exception as e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida')", "logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ ==", "import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\"", "hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for", "house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg =", "state in state_list: links.extend(state.find_all('a', href=True)) state_links = {} for link in links: state_links.update({link.text:", "== 'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan()", "', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx'", "senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp in senate_photos: try: pol", "elif state == 'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois() elif state ==", "scrape_wisconsin() elif state == 'georgia': scrape_georgia() else: logger.info('Sorry, that state is not yet", "== 'florida': scrape_florida() elif state == 'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa()", "= requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as e: logger.debug(e) pass def", "'oregon': scrape_oregon() elif state == 'florida': scrape_florida() elif state == 'colorado': scrape_colorado() elif", "def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if", "scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link)", "scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try: state = sys.argv[1]", "house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup", "scrape_washington() elif state == 'oregon': scrape_oregon() elif state == 'florida': scrape_florida() elif state", "fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name),", "in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href']))", "BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = [] for state", "links.extend(state.find_all('a', href=True)) state_links = {} for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links,", "requests from bs4 import BeautifulSoup import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger", "'html.parser') senate_photos = senate_soup.find_all('a') for sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if", "'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia() else: logger.info('Sorry, that state is not", "def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page =", "BeautifulSoup import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites():", "sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page", "senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl", "import json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list", "link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\"", "simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx'", "logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin')", "soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = []", "scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try: state = sys.argv[1] except Exception as", "attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_')", "scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try:", "requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links =", "= {} for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url,", "'.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as", "import logging import requests from bs4 import BeautifulSoup import json import sys import", "a list of image links :param link: :param attrs: :return: \"\"\" state_page =", "open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp", "import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page =", "[] for state in state_list: links.extend(state.find_all('a', href=True)) state_links = {} for link in", "house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try:", "pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp in senate_photos: try:", "'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt']))", "BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name =", "= (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def", "house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content,", "requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'})", "== 'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia() else: logger.info('Sorry, that state is", "logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in", "soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'],", "\"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7", "'florida': scrape_florida() elif state == 'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa() elif", "elif state == 'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia() else: logger.info('Sorry, that", "open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link)", "sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except", "logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa():", "e: logger.debug(e) pass def scrape_oregon(): logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def", "= soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = [] for state in state_list: links.extend(state.find_all('a',", "logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan')", "attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs'])", "house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img',", "def scrape_georgia(): logger.debug('georgia') if __name__ == '__main__': try: state = sys.argv[1] except Exception", "state == 'oregon': scrape_oregon() elif state == 'florida': scrape_florida() elif state == 'colorado':", "requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content) else: pass except Exception as e: logger.debug(e) pass def scrape_oregon():", "= state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos:", "if state == 'california': scrape_california() elif state == 'washington': scrape_washington() elif state ==", "elif state == 'florida': scrape_florida() elif state == 'colorado': scrape_colorado() elif state ==", "scrape_illinois() elif state == 'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif state", "= 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos", "== 'illinois': scrape_illinois() elif state == 'michigan': scrape_michigan() elif state == 'wisconsin': scrape_wisconsin()", "margin7 three-column-list'}) links = [] for state in state_list: links.extend(state.find_all('a', href=True)) state_links =", "logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup =", "= fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src'])", "'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content,", "except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp in senate_photos:", "def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def", "except Exception as e: logger.warning(e) if state == 'california': scrape_california() elif state ==", "'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp in", "= BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp in senate_photos: try: pol =", "logger.debug('oregon') def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois')", "sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content)", "state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts", "senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]),", "senate_photos = senate_soup.find_all('a') for sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg'", "def scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def", "soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali", "= 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup =", "requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'})", "'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois() elif", "cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name =", "try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg = requests.get(\"http://leg.wa.gov{}\".format(sp['href'])) open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content)", "scrape_california() elif state == 'washington': scrape_washington() elif state == 'oregon': scrape_oregon() elif state", "def fetch_top_list(url, tag, attrs=None): \"\"\" use a link to fetch a list of", "state_links = {} for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def", "'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page", "'washington': scrape_washington() elif state == 'oregon': scrape_oregon() elif state == 'florida': scrape_florida() elif", "senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx' house_page = requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser')", "scrape_michigan() elif state == 'wisconsin': scrape_wisconsin() elif state == 'georgia': scrape_georgia() else: logger.info('Sorry,", "scrape_florida(): logger.debug('florida') def scrape_colorado(): logger.debug('colorado') def scrape_iowa(): logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan():", "house_photos: file_name = hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content,", "senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos:", "links = [] for state in state_list: links.extend(state.find_all('a', href=True)) state_links = {} for", "if __name__ == '__main__': try: state = sys.argv[1] except Exception as e: logger.warning(e)", "link to fetch a list of image links :param link: :param attrs: :return:", "== 'oregon': scrape_oregon() elif state == 'florida': scrape_florida() elif state == 'colorado': scrape_colorado()", "'_') logger.debug(sp['src']) simg = requests.get(sp['src']) open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content) def scrape_washington(): house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx' senate_link", "state == 'washington': scrape_washington() elif state == 'oregon': scrape_oregon() elif state == 'florida':", "hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos =", "elif state == 'georgia': scrape_georgia() else: logger.info('Sorry, that state is not yet supported.')", "sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in house_photos: file_name = hp['src'].split('/')[-1] himg", "open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use a link to fetch", "use a link to fetch a list of image links :param link: :param", "= logging.getLogger(__name__) def scrape_state_sites(): state_leg_list = \"https://www.congress.gov/state-legislature-websites\" state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content,", "state_page = requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'})", "= requests.get(\"{}{}.jpg\".format(house_purl, hp['alt'])) open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content) except: pass senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos =", "scrape_florida() elif state == 'colorado': scrape_colorado() elif state == 'iowa': scrape_iowa() elif state", "\"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california():", "= BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links = [] for", "attrs=None): \"\"\" use a link to fetch a list of image links :param", "= requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts =", "= BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'}) for sp in senate_photos: sfile_name", "= hp['src'].split('/')[-1] himg = requests.get(hp['src']) open('data/california/house/{}'.format(file_name), 'wb').write(himg.content) logger.debug(hp['src']) senate_soup = BeautifulSoup(senate_page.content, 'html.parser') senate_photos", "scrape_colorado() elif state == 'iowa': scrape_iowa() elif state == 'illinois': scrape_illinois() elif state", "attrs: :return: \"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs)", "sts = state_scraper.StateScraper() cali = sts.fetch_state_data('california') house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs']) for hp in", "== 'california': scrape_california() elif state == 'washington': scrape_washington() elif state == 'oregon': scrape_oregon()", "house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for", "Exception as e: logger.warning(e) if state == 'california': scrape_california() elif state == 'washington':", "for sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg", "sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/') if '.jpg' in pol[1]: simg =", "logger.debug('iowa') def scrape_illinois(): logger.debug('illinois') def scrape_michigan(): logger.debug('michigan') def scrape_wisconsin(): logger.debug('wisconsin') def scrape_georgia(): logger.debug('georgia')", "= BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs) def scrape_california(): sts = state_scraper.StateScraper() cali =", "= requests.get(house_link) senate_page = requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style':", "BeautifulSoup(senate_page.content, 'html.parser') senate_photos = senate_soup.find_all('a') for sp in senate_photos: try: pol = sp['href'].split('/Senate/Senators/publishingimages/')", "try: state = sys.argv[1] except Exception as e: logger.warning(e) if state == 'california':", "in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use", "json import sys import state_scraper logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def scrape_state_sites(): state_leg_list =", "= [] for state in state_list: links.extend(state.find_all('a', href=True)) state_links = {} for link", ":param attrs: :return: \"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag,", "href=True)) state_links = {} for link in links: state_links.update({link.text: link['href']}) open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4))", "'w').write(json.dumps(state_links, indent=4)) def fetch_top_list(url, tag, attrs=None): \"\"\" use a link to fetch a", "= requests.get(state_leg_list) soup = BeautifulSoup(state_page.content, 'html.parser') state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'}) links", "attrs={'style': 'width:60px;'}) house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/' for hp in house_photos: try: himg = requests.get(\"{}{}.jpg\".format(house_purl,", "= requests.get(senate_link) house_soup = BeautifulSoup(house_page.content, 'html.parser') house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'}) house_purl =", ":param link: :param attrs: :return: \"\"\" state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser')" ]
[ "{ 1: { 'Name': 'Separate', 'Type': Separate}, 3: { 'Name': 'AppendDegreesForBoth', 'Type': 'bool'},", "import Separate class Today: definitions = { 1: { 'Name': 'Separate', 'Type': Separate},", "class Today: definitions = { 1: { 'Name': 'Separate', 'Type': Separate}, 3: {", "Today: definitions = { 1: { 'Name': 'Separate', 'Type': Separate}, 3: { 'Name':", "definitions = { 1: { 'Name': 'Separate', 'Type': Separate}, 3: { 'Name': 'AppendDegreesForBoth',", "from watchFaceParser.elements.weatherElements.separate import Separate class Today: definitions = { 1: { 'Name': 'Separate',", "watchFaceParser.elements.weatherElements.separate import Separate class Today: definitions = { 1: { 'Name': 'Separate', 'Type':", "= { 1: { 'Name': 'Separate', 'Type': Separate}, 3: { 'Name': 'AppendDegreesForBoth', 'Type':", "1: { 'Name': 'Separate', 'Type': Separate}, 3: { 'Name': 'AppendDegreesForBoth', 'Type': 'bool'}, }", "Separate class Today: definitions = { 1: { 'Name': 'Separate', 'Type': Separate}, 3:" ]
[ "file_path : str Path with vtk extension were the export occurs mesh :", "Parameters ---------- my_fields2 : FieldsContainer or Field Examples -------- >>> from ansys.dpf import", "\"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\",", "is not None: self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1) if fields2 is", "the operator by evaluationg it Returns -------- outputs : OutputsVtkExport \"\"\" return super().outputs", "container don't have a mesh in their support fields1 : FieldsContainer or Field", "warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import", ">>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows to connect fields2 input", "self._outputs = OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path) if mesh is not", "vtk extension were the export occurs Parameters ---------- my_file_path : str Examples --------", "OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user inputs", "self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0)", "first field or fields container don't have a mesh in their support Parameters", "optional=True, document=\"\"\"Necessary if the first field or fields container don't have a mesh", "field or fields container don't have a mesh in their support fields1 :", "dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\"", "FieldsContainer or Field Fields exported fields2 : FieldsContainer or Field Fields exported Examples", "fields2=my_fields2, ... ) \"\"\" def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None,", "name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False,", "input to the operator. Fields exported Parameters ---------- my_fields2 : FieldsContainer or Field", ">>> # Instantiate operator and connect inputs in one line >>> op =", "op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self): \"\"\"Allows", ">>> op = dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs. ... \"\"\" def", "Returns -------- outputs : OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used", "their support fields1 : FieldsContainer or Field Fields exported fields2 : FieldsContainer or", "op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs,", "= Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect file_path", ">>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>>", "then be changed to the user needs and be used to instantiate the", "op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2)", "), }, map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None): \"\"\"Returns the default config", "or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self): \"\"\"Allows to connect mesh", "Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1", "my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1", ">>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self): \"\"\"Allows to", "---------- my_mesh : MeshedRegion Examples -------- >>> from ansys.dpf import core as dpf", "allows to customize how the operation will be processed by the operator. Parameters", "\"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to connect inputs to the", "or Field Examples -------- >>> from ansys.dpf import core as dpf >>> op", "): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is", "Parameters ---------- server : server.DPFServer, optional Server with channel connected to the remote", "InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user inputs to vtk_export operator. Examples --------", "op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect inputs in one line >>> op", "def fields1(self): \"\"\"Allows to connect fields1 input to the operator. Fields exported Parameters", "use the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables", "operator. This config can then be changed to the user needs and be", "instance. When ``None``, attempts to use the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\",", "class vtk_export(Operator): \"\"\"Write the input field and fields container into a given vtk", "self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path) if", "return super().inputs @property def outputs(self): \"\"\"Enables to get outputs of the operator by", "ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input field and fields container", "= dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 =", "fields2(self): \"\"\"Allows to connect fields2 input to the operator. Fields exported Parameters ----------", "DPF operator classes. \"\"\" from warnings import warn from ansys.dpf.core.dpf_operator import Operator from", "to the remote or local instance. When ``None``, attempts to use the the", "server.DPFServer, optional Server with channel connected to the remote or local instance. When", "is not None: self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec():", "from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the", "not None: self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh) if fields1 is not", "first field or fields container don't have a mesh in their support\"\"\", ),", "import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>>", "export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field", "Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>> # Make input connections >>> my_file_path", "= InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path) if mesh", "from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification,", "def default_config(server=None): \"\"\"Returns the default config of the operator. This config can then", "or Field Fields exported Examples -------- >>> from ansys.dpf import core as dpf", "used to connect user inputs to vtk_export operator. Examples -------- >>> from ansys.dpf", "self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect", "core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs.", "the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first", "instantiate the operator. The Configuration allows to customize how the operation will be", "is not None: self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh) if fields1 is", "to the operator Returns -------- inputs : InputsVtkExport \"\"\" return super().inputs @property def", "return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs from vtk_export operator.", "= str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 =", "the export occurs Parameters ---------- my_file_path : str Examples -------- >>> from ansys.dpf", ">>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op)", "Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import", "spec @staticmethod def default_config(server=None): \"\"\"Returns the default config of the operator. This config", "import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or", "core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>>", "Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to connect inputs to the operator Returns", "Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property", "Examples -------- >>> from ansys.dpf import core as dpf >>> # Instantiate operator", "vtk path Parameters ---------- file_path : str Path with vtk extension were the", "exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={},", ">>> # Connect inputs : op.inputs. ... \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().outputs,", "mesh is not None: self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1) if fields2", "input to the operator. Necessary if the first field or fields container don't", ">>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> #", ">>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect inputs", ">>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>>", "given vtk path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"],", ">>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate", "get outputs of the operator by evaluationg it Returns -------- outputs : OutputsVtkExport", "self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1)", "as dpf >>> op = dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs. ...", "\"\"\"Intermediate class used to get outputs from vtk_export operator. Examples -------- >>> from", ">>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>>", "my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> #", "ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import _Outputs from", ">>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh)", ">>> # Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>> # Make input connections", "\"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user inputs to", "to instantiate the operator. The Configuration allows to customize how the operation will", "\"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None):", "-------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>>", "# Instantiate operator and connect inputs in one line >>> op = dpf.operators.serialization.vtk_export(", "document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None): \"\"\"Returns the", ">>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return", "Instantiate operator and connect inputs in one line >>> op = dpf.operators.serialization.vtk_export( ...", "channel connected to the remote or local instance. When ``None``, attempts to use", "op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self): \"\"\"Allows to connect fields1 input to", "the first field or fields container don't have a mesh in their support\"\"\",", "Field Examples -------- >>> from ansys.dpf import core as dpf >>> op =", "self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to", "\"\"\" vtk_export =============== Autogenerated DPF operator classes. \"\"\" from warnings import warn from", "optional Necessary if the first field or fields container don't have a mesh", "dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh)", "and be used to instantiate the operator. The Configuration allows to customize how", ">>> op = dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh =", "ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> #", "or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows to connect fields2", "fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self)", "\"\"\"Allows to connect fields2 input to the operator. Fields exported Parameters ---------- my_fields2", "import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification", "op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2),", "\"\"\" def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config,", "op = dpf.operators.serialization.vtk_export() >>> # Make input connections >>> my_file_path = str() >>>", "be used to instantiate the operator. The Configuration allows to customize how the", "def outputs(self): \"\"\"Enables to get outputs of the operator by evaluationg it Returns", "operator by evaluationg it Returns -------- outputs : OutputsVtkExport \"\"\" return super().outputs class", "core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>>", "Parameters ---------- file_path : str Path with vtk extension were the export occurs", "one line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ...", "self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1)", "class used to connect user inputs to vtk_export operator. Examples -------- >>> from", "self._fields1 @property def fields2(self): \"\"\"Allows to connect fields2 input to the operator. Fields", "container into a given vtk path Parameters ---------- file_path : str Path with", ": str Path with vtk extension were the export occurs mesh : MeshedRegion,", ">>> op = dpf.operators.serialization.vtk_export() >>> # Make input connections >>> my_file_path = str()", "Fields exported Parameters ---------- my_fields1 : FieldsContainer or Field Examples -------- >>> from", "= dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property", "= dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class", "None: self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1) if fields2 is not None:", "global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to connect inputs", "Parameters ---------- my_fields1 : FieldsContainer or Field Examples -------- >>> from ansys.dpf import", "spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk", "dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer()", "Parameters ---------- my_file_path : str Examples -------- >>> from ansys.dpf import core as", "str Path with vtk extension were the export occurs mesh : MeshedRegion, optional", "\"\"\"Enables to connect inputs to the operator Returns -------- inputs : InputsVtkExport \"\"\"", "}, map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None): \"\"\"Returns the default config of", "if fields1 is not None: self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod", "# or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows to connect", "by evaluationg it Returns -------- outputs : OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs):", "fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = ( \"\"\"Write the", "vtk extension were the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary", ": FieldsContainer or Field Examples -------- >>> from ansys.dpf import core as dpf", "from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path =", "fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs =", "config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is not None:", "op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op,", "Server with channel connected to the remote or local instance. When ``None``, attempts", "super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user inputs to vtk_export operator.", "the user needs and be used to instantiate the operator. The Configuration allows", "or fields container don't have a mesh in their support\"\"\", ), 2: PinSpecification(", ">>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path", "FieldsContainer or Field Fields exported Examples -------- >>> from ansys.dpf import core as", "and connect inputs in one line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ...", "Configuration allows to customize how the operation will be processed by the operator.", "input to the operator. Fields exported Parameters ---------- my_fields1 : FieldsContainer or Field", "operator. Fields exported Parameters ---------- my_fields1 : FieldsContainer or Field Examples -------- >>>", "\"\"\" return self._file_path @property def mesh(self): \"\"\"Allows to connect mesh input to the", "op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs from", "ansys.dpf import core as dpf >>> # Instantiate operator >>> op = dpf.operators.serialization.vtk_export()", "OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs from vtk_export operator. Examples -------- >>>", "When ``None``, attempts to use the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server)", "field or fields container don't have a mesh in their support Parameters ----------", "description = ( \"\"\"Write the input field and fields container into a given", "or local instance. When ``None``, attempts to use the the global server. \"\"\"", "support fields1 : FieldsContainer or Field Fields exported fields2 : FieldsContainer or Field", "self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1)", "MeshedRegion Examples -------- >>> from ansys.dpf import core as dpf >>> op =", "the operator. Fields exported Parameters ---------- my_fields1 : FieldsContainer or Field Examples --------", "dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def", "inputs to vtk_export operator. Examples -------- >>> from ansys.dpf import core as dpf", "inputs to the operator Returns -------- inputs : InputsVtkExport \"\"\" return super().inputs @property", "@property def fields1(self): \"\"\"Allows to connect fields1 input to the operator. Fields exported", "will be processed by the operator. Parameters ---------- server : server.DPFServer, optional Server", "( \"\"\"Write the input field and fields container into a given vtk path\"\"\"", "3, op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect file_path input to", "name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were the export occurs\"\"\", ), 1:", "MeshedRegion, optional Necessary if the first field or fields container don't have a", "have a mesh in their support fields1 : FieldsContainer or Field Fields exported", "file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self)", "= dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property", "-1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3,", "mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs", "server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path)", "None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = ( \"\"\"Write the input field and", "Field Fields exported fields2 : FieldsContainer or Field Fields exported Examples -------- >>>", "input connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>>", "dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0),", "to connect inputs to the operator Returns -------- inputs : InputsVtkExport \"\"\" return", "as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh)", "name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return spec @staticmethod", "= dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs. ... \"\"\" def __init__(self, op:", "operation will be processed by the operator. Parameters ---------- server : server.DPFServer, optional", "... fields2=my_fields2, ... ) \"\"\" def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None,", ">>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1)", "dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect inputs in one line", "import Input, _Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class", "file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def __init__( self,", "self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs from vtk_export operator. Examples", "-------- outputs : OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to", "fields container don't have a mesh in their support fields1 : FieldsContainer or", "inputs in one line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ...", "as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2)", "processed by the operator. Parameters ---------- server : server.DPFServer, optional Server with channel", "def file_path(self): \"\"\"Allows to connect file_path input to the operator. Path with vtk", ">>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self):", "inputs : InputsVtkExport \"\"\" return super().inputs @property def outputs(self): \"\"\"Enables to get outputs", "@property def fields2(self): \"\"\"Allows to connect fields2 input to the operator. Fields exported", "= dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property", "operator Returns -------- inputs : InputsVtkExport \"\"\" return super().inputs @property def outputs(self): \"\"\"Enables", "how the operation will be processed by the operator. Parameters ---------- server :", ">>> from ansys.dpf import core as dpf >>> # Instantiate operator >>> op", "PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were the export occurs\"\"\", ),", "don't have a mesh in their support fields1 : FieldsContainer or Field Fields", ">>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return", "op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows to connect fields2 input to", "\"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs from vtk_export", "= dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path =", "core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>>", "... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def __init__( self, file_path=None, mesh=None, fields1=None,", "def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path)", "fields1(self): \"\"\"Allows to connect fields1 input to the operator. Fields exported Parameters ----------", "map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None): \"\"\"Returns the default config of the", "connect fields1 input to the operator. Fields exported Parameters ---------- my_fields1 : FieldsContainer", "None: self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description =", "connect inputs to the operator Returns -------- inputs : InputsVtkExport \"\"\" return super().inputs", "dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\"", "Path with vtk extension were the export occurs mesh : MeshedRegion, optional Necessary", "from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import _Outputs", ">>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>>", "Field Fields exported Examples -------- >>> from ansys.dpf import core as dpf >>>", "container don't have a mesh in their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\",", "---------- my_fields2 : FieldsContainer or Field Examples -------- >>> from ansys.dpf import core", "mesh : MeshedRegion, optional Necessary if the first field or fields container don't", "\"\"\"Allows to connect fields1 input to the operator. Fields exported Parameters ---------- my_fields1", "Fields exported fields2 : FieldsContainer or Field Fields exported Examples -------- >>> from", "op = dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs. ... \"\"\" def __init__(self,", "export occurs Parameters ---------- my_file_path : str Examples -------- >>> from ansys.dpf import", "into a given vtk path Parameters ---------- file_path : str Path with vtk", "operator and connect inputs in one line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path,", "op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh =", "optional=False, document=\"\"\"Path with vtk extension were the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\",", "fields container into a given vtk path Parameters ---------- file_path : str Path", "from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>>", "# Make input connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh =", "mesh input to the operator. Necessary if the first field or fields container", "operator. The Configuration allows to customize how the operation will be processed by", "-1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2,", "input field and fields container into a given vtk path\"\"\" ) spec =", "mesh(self): \"\"\"Allows to connect mesh input to the operator. Necessary if the first", "The Configuration allows to customize how the operation will be processed by the", "def _spec(): description = ( \"\"\"Write the input field and fields container into", "Path with vtk extension were the export occurs Parameters ---------- my_file_path : str", "return spec @staticmethod def default_config(server=None): \"\"\"Returns the default config of the operator. This", "= dpf.operators.serialization.vtk_export() >>> # Make input connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path)", "attempts to use the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def", "to get outputs of the operator by evaluationg it Returns -------- outputs :", ">>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path)", "Returns -------- inputs : InputsVtkExport \"\"\" return super().inputs @property def outputs(self): \"\"\"Enables to", "op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def file_path(self):", "description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were the", ": FieldsContainer or Field Fields exported fields2 : FieldsContainer or Field Fields exported", ">>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return", "op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self): \"\"\"Allows", "type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field or fields container don't have a", "file_path input to the operator. Path with vtk extension were the export occurs", "not None: self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description", "None: self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh) if fields1 is not None:", "= dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>>", "server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path", "# Connect inputs : op.inputs. ... \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().outputs, op)", "op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh", "user needs and be used to instantiate the operator. The Configuration allows to", "connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh)", "by the operator. Parameters ---------- server : server.DPFServer, optional Server with channel connected", "not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = ( \"\"\"Write the input field", "have a mesh in their support Parameters ---------- my_mesh : MeshedRegion Examples --------", "from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> # Connect", "in their support fields1 : FieldsContainer or Field Fields exported fields2 : FieldsContainer", "\"\"\"Allows to connect file_path input to the operator. Path with vtk extension were", "OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh)", "occurs mesh : MeshedRegion, optional Necessary if the first field or fields container", "connect user inputs to vtk_export operator. Examples -------- >>> from ansys.dpf import core", "self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op,", "to the operator. Fields exported Parameters ---------- my_fields2 : FieldsContainer or Field Examples", "Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect file_path input", "fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None,", "a mesh in their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields", "connected to the remote or local instance. When ``None``, attempts to use the", "op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2", "the operator. Fields exported Parameters ---------- my_fields2 : FieldsContainer or Field Examples --------", "it Returns -------- outputs : OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class", "the operator. The Configuration allows to customize how the operation will be processed", "self._mesh @property def fields1(self): \"\"\"Allows to connect fields1 input to the operator. Fields", "and fields container into a given vtk path\"\"\" ) spec = Specification( description=description,", "default config of the operator. This config can then be changed to the", "op = dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion()", "the operator. Necessary if the first field or fields container don't have a", "as dpf >>> # Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>> # Make", "exported Examples -------- >>> from ansys.dpf import core as dpf >>> # Instantiate", "_spec(): description = ( \"\"\"Write the input field and fields container into a", "mesh in their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\",", "@property def inputs(self): \"\"\"Enables to connect inputs to the operator Returns -------- inputs", "ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> #", "= OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path) if mesh is not None:", "core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path)", "... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def __init__( self, file_path=None,", "= Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1)", "= Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension", "into a given vtk path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification(", "export occurs mesh : MeshedRegion, optional Necessary if the first field or fields", "fields1 is not None: self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def", "dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op:", "@property def mesh(self): \"\"\"Allows to connect mesh input to the operator. Necessary if", "dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs. ... \"\"\" def __init__(self, op: Operator):", "Input, _Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator):", "str Examples -------- >>> from ansys.dpf import core as dpf >>> op =", "operator. Necessary if the first field or fields container don't have a mesh", ">>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>>", "input field and fields container into a given vtk path Parameters ---------- file_path", "core as dpf >>> # Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>> #", "def fields2(self): \"\"\"Allows to connect fields2 input to the operator. Fields exported Parameters", "or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get", "Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were", "class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs from vtk_export operator. Examples --------", "\"\"\"Enables to get outputs of the operator by evaluationg it Returns -------- outputs", "vtk extension were the export occurs mesh : MeshedRegion, optional Necessary if the", "__init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs", "---------- server : server.DPFServer, optional Server with channel connected to the remote or", "ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification", "\"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1)", ">>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0,", "\"\"\"Write the input field and fields container into a given vtk path\"\"\" )", "return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user inputs to vtk_export", "import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or", "= dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate", "fields2 : FieldsContainer or Field Fields exported Examples -------- >>> from ansys.dpf import", "optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ),", "were the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the", "first field or fields container don't have a mesh in their support fields1", "name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field or fields container don't have", "needs and be used to instantiate the operator. The Configuration allows to customize", "config of the operator. This config can then be changed to the user", "exported Parameters ---------- my_fields1 : FieldsContainer or Field Examples -------- >>> from ansys.dpf", ">>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator):", ">>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self):", "op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect file_path input to the", "changed to the user needs and be used to instantiate the operator. The", ": str Examples -------- >>> from ansys.dpf import core as dpf >>> op", "---------- my_fields1 : FieldsContainer or Field Examples -------- >>> from ansys.dpf import core", "default_config(server=None): \"\"\"Returns the default config of the operator. This config can then be", "server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to connect inputs to", "document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), },", "connect fields2 input to the operator. Fields exported Parameters ---------- my_fields2 : FieldsContainer", "field and fields container into a given vtk path Parameters ---------- file_path :", "outputs(self): \"\"\"Enables to get outputs of the operator by evaluationg it Returns --------", "---------- file_path : str Path with vtk extension were the export occurs mesh", "= Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh)", "0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 =", "to connect mesh input to the operator. Necessary if the first field or", "in their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ),", "from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input field and fields", "0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows", "the input field and fields container into a given vtk path Parameters ----------", "= dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect inputs in one", ">>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used", "dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def", "the export occurs mesh : MeshedRegion, optional Necessary if the first field or", "the operator Returns -------- inputs : InputsVtkExport \"\"\" return super().inputs @property def outputs(self):", "super().inputs @property def outputs(self): \"\"\"Enables to get outputs of the operator by evaluationg", "connect mesh input to the operator. Necessary if the first field or fields", "def inputs(self): \"\"\"Enables to connect inputs to the operator Returns -------- inputs :", "support Parameters ---------- my_mesh : MeshedRegion Examples -------- >>> from ansys.dpf import core", "document=\"\"\"Path with vtk extension were the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"],", "have a mesh in their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False,", ">>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2)", "with channel connected to the remote or local instance. When ``None``, attempts to", "in one line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1,", ">>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect inputs in one line >>>", "Specification class vtk_export(Operator): \"\"\"Write the input field and fields container into a given", "PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return spec", "---------- my_file_path : str Examples -------- >>> from ansys.dpf import core as dpf", "the operation will be processed by the operator. Parameters ---------- server : server.DPFServer,", "Parameters ---------- my_mesh : MeshedRegion Examples -------- >>> from ansys.dpf import core as", "super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is not", "\"\"\"Write the input field and fields container into a given vtk path Parameters", "dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh", "Autogenerated DPF operator classes. \"\"\" from warnings import warn from ansys.dpf.core.dpf_operator import Operator", "dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def", "to the operator. Path with vtk extension were the export occurs Parameters ----------", "to get outputs from vtk_export operator. Examples -------- >>> from ansys.dpf import core", ">>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self): \"\"\"Allows to connect mesh input", "operator. Parameters ---------- server : server.DPFServer, optional Server with channel connected to the", "Necessary if the first field or fields container don't have a mesh in", ">>> # Make input connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh", "type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return spec @staticmethod def", "ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> #", "mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def __init__( self, file_path=None, mesh=None,", "dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or >>> op.inputs.mesh(my_mesh) \"\"\"", "= dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\"", ") spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with", "ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input", "exported\"\"\", ), }, map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None): \"\"\"Returns the default", "vtk_export operator. Examples -------- >>> from ansys.dpf import core as dpf >>> op", "and fields container into a given vtk path Parameters ---------- file_path : str", "self._file_path @property def mesh(self): \"\"\"Allows to connect mesh input to the operator. Necessary", "path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path", "as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1)", "op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows", "), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\",", ">>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\"", "operator. Examples -------- >>> from ansys.dpf import core as dpf >>> op =", "or Field Fields exported fields2 : FieldsContainer or Field Fields exported Examples --------", "op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3),", "occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field or", "super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1), 1,", "self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = (", "return self._file_path @property def mesh(self): \"\"\"Allows to connect mesh input to the operator.", "= Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2)", "were the export occurs Parameters ---------- my_file_path : str Examples -------- >>> from", "a mesh in their support Parameters ---------- my_mesh : MeshedRegion Examples -------- >>>", "as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path)", "can then be changed to the user needs and be used to instantiate", "FieldsContainer or Field Examples -------- >>> from ansys.dpf import core as dpf >>>", "import warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs", "my_fields1 : FieldsContainer or Field Examples -------- >>> from ansys.dpf import core as", "dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def", "op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path", "their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3:", "fields container don't have a mesh in their support Parameters ---------- my_mesh :", "# or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self): \"\"\"Allows to connect", "the operator. Parameters ---------- server : server.DPFServer, optional Server with channel connected to", "my_file_path : str Examples -------- >>> from ansys.dpf import core as dpf >>>", "the input field and fields container into a given vtk path\"\"\" ) spec", "fields container don't have a mesh in their support\"\"\", ), 2: PinSpecification( name=\"fields\",", "= dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self,", "self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = ( \"\"\"Write the input field and fields", "local instance. When ``None``, attempts to use the the global server. \"\"\" return", "self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect file_path input to the operator. Path", "my_fields2 : FieldsContainer or Field Examples -------- >>> from ansys.dpf import core as", "outputs of the operator by evaluationg it Returns -------- outputs : OutputsVtkExport \"\"\"", "line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2,", "mesh in their support fields1 : FieldsContainer or Field Fields exported fields2 :", "container into a given vtk path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={ 0:", "config can then be changed to the user needs and be used to", ": MeshedRegion Examples -------- >>> from ansys.dpf import core as dpf >>> op", "PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"],", ">>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to get outputs", "were the export occurs mesh : MeshedRegion, optional Necessary if the first field", "import core as dpf >>> # Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>>", "from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>>", "... ) \"\"\" def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ):", "the first field or fields container don't have a mesh in their support", "__init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh", "Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1) self._inputs.append(self._file_path) self._mesh = Input(vtk_export._spec().input_pin(1),", "if file_path is not None: self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh) if", "a mesh in their support fields1 : FieldsContainer or Field Fields exported fields2", "fields container into a given vtk path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={", "from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>>", "field and fields container into a given vtk path\"\"\" ) spec = Specification(", "1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 =", "be processed by the operator. Parameters ---------- server : server.DPFServer, optional Server with", "from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>>", ") return spec @staticmethod def default_config(server=None): \"\"\"Returns the default config of the operator.", "warnings import warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from", "\"\"\"Allows to connect mesh input to the operator. Necessary if the first field", "\"\"\"Intermediate class used to connect user inputs to vtk_export operator. Examples -------- >>>", "@staticmethod def default_config(server=None): \"\"\"Returns the default config of the operator. This config can", ">>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self): \"\"\"Allows to", "to customize how the operation will be processed by the operator. Parameters ----------", "not None: self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1) if fields2 is not", "class used to get outputs from vtk_export operator. Examples -------- >>> from ansys.dpf", "# or >>> op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self): \"\"\"Allows to connect", "from vtk_export operator. Examples -------- >>> from ansys.dpf import core as dpf >>>", "map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were the export", "or fields container don't have a mesh in their support fields1 : FieldsContainer", "Fields exported Parameters ---------- my_fields2 : FieldsContainer or Field Examples -------- >>> from", "extension were the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if", "server=server) @property def inputs(self): \"\"\"Enables to connect inputs to the operator Returns --------", "connect file_path input to the operator. Path with vtk extension were the export", "used to instantiate the operator. The Configuration allows to customize how the operation", "Fields exported Examples -------- >>> from ansys.dpf import core as dpf >>> #", "2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1) self._inputs.append(self._fields2) @property def", "def mesh(self): \"\"\"Allows to connect mesh input to the operator. Necessary if the", "extension were the export occurs mesh : MeshedRegion, optional Necessary if the first", ">>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ...", "extension were the export occurs Parameters ---------- my_file_path : str Examples -------- >>>", "import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or", "if the first field or fields container don't have a mesh in their", "... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... ) \"\"\" def __init__(", "if fields2 is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = ( \"\"\"Write", "vtk_export =============== Autogenerated DPF operator classes. \"\"\" from warnings import warn from ansys.dpf.core.dpf_operator", "is not None: self.inputs.fields2.connect(fields2) @staticmethod def _spec(): description = ( \"\"\"Write the input", "1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field or fields container", "2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\",", "@staticmethod def _spec(): description = ( \"\"\"Write the input field and fields container", "self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1) if fields2 is not None: self.inputs.fields2.connect(fields2)", "the remote or local instance. When ``None``, attempts to use the the global", ": InputsVtkExport \"\"\" return super().inputs @property def outputs(self): \"\"\"Enables to get outputs of", "in their support Parameters ---------- my_mesh : MeshedRegion Examples -------- >>> from ansys.dpf", "my_mesh : MeshedRegion Examples -------- >>> from ansys.dpf import core as dpf >>>", "), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, )", "PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input field and fields container into a", ") \"\"\" def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\",", "\"\"\" from warnings import warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input,", "optional Server with channel connected to the remote or local instance. When ``None``,", "self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs =", "Make input connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion()", "user inputs to vtk_export operator. Examples -------- >>> from ansys.dpf import core as", "fields2 input to the operator. Fields exported Parameters ---------- my_fields2 : FieldsContainer or", "exported fields2 : FieldsContainer or Field Fields exported Examples -------- >>> from ansys.dpf", "container don't have a mesh in their support Parameters ---------- my_mesh : MeshedRegion", "to use the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self):", "don't have a mesh in their support Parameters ---------- my_mesh : MeshedRegion Examples", "to connect fields1 input to the operator. Fields exported Parameters ---------- my_fields1 :", "as dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>>", "or fields container don't have a mesh in their support Parameters ---------- my_mesh", "get outputs from vtk_export operator. Examples -------- >>> from ansys.dpf import core as", "field or fields container don't have a mesh in their support\"\"\", ), 2:", "= ( \"\"\"Write the input field and fields container into a given vtk", "to the operator. Fields exported Parameters ---------- my_fields1 : FieldsContainer or Field Examples", "to the user needs and be used to instantiate the operator. The Configuration", "PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field or fields container don't", "core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>>", "dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs):", "op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh, ... fields1=my_fields1, ... fields2=my_fields2, ... )", "op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1", "operator classes. \"\"\" from warnings import warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs", "dpf >>> # Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>> # Make input", "my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path", "dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator", "file_path is not None: self.inputs.file_path.connect(file_path) if mesh is not None: self.inputs.mesh.connect(mesh) if fields1", ">>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows to", "import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> # Connect inputs :", "return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to connect inputs to the operator", "self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op,", "of the operator by evaluationg it Returns -------- outputs : OutputsVtkExport \"\"\" return", "ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> #", "@property def outputs(self): \"\"\"Enables to get outputs of the operator by evaluationg it", "remote or local instance. When ``None``, attempts to use the the global server.", ">>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self): \"\"\"Allows to connect fields1 input", "op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class", "to connect fields2 input to the operator. Fields exported Parameters ---------- my_fields2 :", "operator. Fields exported Parameters ---------- my_fields2 : FieldsContainer or Field Examples -------- >>>", "inputs(self): \"\"\"Enables to connect inputs to the operator Returns -------- inputs : InputsVtkExport", ": server.DPFServer, optional Server with channel connected to the remote or local instance.", "fields1 : FieldsContainer or Field Fields exported fields2 : FieldsContainer or Field Fields", "classes. \"\"\" from warnings import warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import", "if mesh is not None: self.inputs.mesh.connect(mesh) if fields1 is not None: self.inputs.fields1.connect(fields1) if", "def __init__( self, file_path=None, mesh=None, fields1=None, fields2=None, config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server)", "document=\"\"\"Necessary if the first field or fields container don't have a mesh in", "\"\"\" return self._mesh @property def fields1(self): \"\"\"Allows to connect fields1 input to the", "op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1)", "with vtk extension were the export occurs\"\"\", ), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True,", "This config can then be changed to the user needs and be used", "\"\"\" return self._fields1 @property def fields2(self): \"\"\"Allows to connect fields2 input to the", ">>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and", "server : server.DPFServer, optional Server with channel connected to the remote or local", "or >>> op.inputs.mesh(my_mesh) \"\"\" return self._mesh @property def fields1(self): \"\"\"Allows to connect fields1", "_Inputs from ansys.dpf.core.outputs import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write", "Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export()", "ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> my_file_path = str()", "return self._mesh @property def fields1(self): \"\"\"Allows to connect fields1 input to the operator.", "don't have a mesh in their support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"],", "a given vtk path Parameters ---------- file_path : str Path with vtk extension", "InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if file_path is not None: self.inputs.file_path.connect(file_path) if mesh is", "to connect user inputs to vtk_export operator. Examples -------- >>> from ansys.dpf import", "evaluationg it Returns -------- outputs : OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate", "their support Parameters ---------- my_mesh : MeshedRegion Examples -------- >>> from ansys.dpf import", "class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user inputs to vtk_export operator. Examples", "\"\"\" return super().inputs @property def outputs(self): \"\"\"Enables to get outputs of the operator", "of the operator. This config can then be changed to the user needs", "operator >>> op = dpf.operators.serialization.vtk_export() >>> # Make input connections >>> my_file_path =", "dpf >>> op = dpf.operators.serialization.vtk_export() >>> # Connect inputs : op.inputs. ... \"\"\"", "op.inputs.fields2.connect(my_fields2) \"\"\" def __init__(self, op: Operator): super().__init__(vtk_export._spec().inputs, op) self._file_path = Input(vtk_export._spec().input_pin(0), 0, op,", "import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.mesh.connect(my_mesh) >>> # or", "to connect file_path input to the operator. Path with vtk extension were the", "file_path(self): \"\"\"Allows to connect file_path input to the operator. Path with vtk extension", "ansys.dpf import core as dpf >>> op = dpf.operators.serialization.vtk_export() >>> # Connect inputs", "given vtk path Parameters ---------- file_path : str Path with vtk extension were", "import _Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input field", "\"\"\"Returns the default config of the operator. This config can then be changed", "exported Parameters ---------- my_fields2 : FieldsContainer or Field Examples -------- >>> from ansys.dpf", "optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return spec @staticmethod def default_config(server=None): \"\"\"Returns", "from warnings import warn from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs", "``None``, attempts to use the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property", "op.inputs.file_path(my_file_path) \"\"\" return self._file_path @property def mesh(self): \"\"\"Allows to connect mesh input to", ": FieldsContainer or Field Fields exported Examples -------- >>> from ansys.dpf import core", "from ansys.dpf import core as dpf >>> # Instantiate operator >>> op =", "Input(vtk_export._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0) self._inputs.append(self._fields1) self._fields2", "config=None, server=None, ): super().__init__(name=\"vtk_export\", config=config, server=server) self._inputs = InputsVtkExport(self) self._outputs = OutputsVtkExport(self) if", "the the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to", "the operator. Path with vtk extension were the export occurs Parameters ---------- my_file_path", "=============== Autogenerated DPF operator classes. \"\"\" from warnings import warn from ansys.dpf.core.dpf_operator import", "to vtk_export operator. Examples -------- >>> from ansys.dpf import core as dpf >>>", "type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields", "_Outputs from ansys.dpf.core.operators.specification import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input field and", "the default config of the operator. This config can then be changed to", "vtk_export(Operator): \"\"\"Write the input field and fields container into a given vtk path", ">>> op.inputs.fields1.connect(my_fields1) >>> # or >>> op.inputs.fields1(my_fields1) \"\"\" return self._fields1 @property def fields2(self):", "dpf.operators.serialization.vtk_export() >>> # Make input connections >>> my_file_path = str() >>> op.inputs.file_path.connect(my_file_path) >>>", ": OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect user", "a given vtk path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\",", "return self._fields1 @property def fields2(self): \"\"\"Allows to connect fields2 input to the operator.", "used to get outputs from vtk_export operator. Examples -------- >>> from ansys.dpf import", "3: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), }, map_output_pin_spec={}, ) return", "-------- inputs : InputsVtkExport \"\"\" return super().inputs @property def outputs(self): \"\"\"Enables to get", "str() >>> op.inputs.file_path.connect(my_file_path) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer()", "support\"\"\", ), 2: PinSpecification( name=\"fields\", type_names=[\"fields_container\", \"field\"], optional=False, document=\"\"\"Fields exported\"\"\", ), 3: PinSpecification(", "dpf >>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.fields2.connect(my_fields2) >>> # or >>> op.inputs.fields2(my_fields2) \"\"\"", "customize how the operation will be processed by the operator. Parameters ---------- server", "op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect", "@property def file_path(self): \"\"\"Allows to connect file_path input to the operator. Path with", "0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were the export occurs\"\"\",", "input to the operator. Path with vtk extension were the export occurs Parameters", "path Parameters ---------- file_path : str Path with vtk extension were the export", "InputsVtkExport \"\"\" return super().inputs @property def outputs(self): \"\"\"Enables to get outputs of the", "my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) \"\"\" def", "1) self._inputs.append(self._fields2) @property def file_path(self): \"\"\"Allows to connect file_path input to the operator.", "# or >>> op.inputs.fields2(my_fields2) \"\"\" return self._fields2 class OutputsVtkExport(_Outputs): \"\"\"Intermediate class used to", "type_names=[\"string\"], optional=False, document=\"\"\"Path with vtk extension were the export occurs\"\"\", ), 1: PinSpecification(", "outputs : OutputsVtkExport \"\"\" return super().outputs class InputsVtkExport(_Inputs): \"\"\"Intermediate class used to connect", "with vtk extension were the export occurs mesh : MeshedRegion, optional Necessary if", "mesh in their support Parameters ---------- my_mesh : MeshedRegion Examples -------- >>> from", ": MeshedRegion, optional Necessary if the first field or fields container don't have", "my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_fields1 = dpf.FieldsContainer() >>> op.inputs.fields1.connect(my_fields1) >>> my_fields2", "the operator. This config can then be changed to the user needs and", "the global server. \"\"\" return Operator.default_config(name=\"vtk_export\", server=server) @property def inputs(self): \"\"\"Enables to connect", "to the operator. Necessary if the first field or fields container don't have", "import PinSpecification, Specification class vtk_export(Operator): \"\"\"Write the input field and fields container into", "-------- >>> from ansys.dpf import core as dpf >>> # Instantiate operator >>>", "connect inputs in one line >>> op = dpf.operators.serialization.vtk_export( ... file_path=my_file_path, ... mesh=my_mesh,", ">>> op = dpf.operators.serialization.vtk_export() >>> op.inputs.file_path.connect(my_file_path) >>> # or >>> op.inputs.file_path(my_file_path) \"\"\" return", "vtk path\"\"\" ) spec = Specification( description=description, map_input_pin_spec={ 0: PinSpecification( name=\"file_path\", type_names=[\"string\"], optional=False,", "occurs Parameters ---------- my_file_path : str Examples -------- >>> from ansys.dpf import core", "), 1: PinSpecification( name=\"mesh\", type_names=[\"abstract_meshed_region\"], optional=True, document=\"\"\"Necessary if the first field or fields", "be changed to the user needs and be used to instantiate the operator.", "outputs from vtk_export operator. Examples -------- >>> from ansys.dpf import core as dpf", "with vtk extension were the export occurs Parameters ---------- my_file_path : str Examples", "# Instantiate operator >>> op = dpf.operators.serialization.vtk_export() >>> # Make input connections >>>", "operator. Path with vtk extension were the export occurs Parameters ---------- my_file_path :", "fields1 input to the operator. Fields exported Parameters ---------- my_fields1 : FieldsContainer or", "my_fields2 = dpf.FieldsContainer() >>> op.inputs.fields2.connect(my_fields2) >>> # Instantiate operator and connect inputs in" ]
[ "parent = relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def", "term models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK,", "SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name", "def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance", "name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name) def", "glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\"", "SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80), unique=False, nullable=False) definition", "relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80), unique=False,", "is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\"", "def __repr__(self): \"\"\"Represent instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name) def get_full_id(self): return", "**kwargs) def __repr__(self): \"\"\"Represent instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name) def get_full_id(self):", "= Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True)", "= reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self,", "class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80), unique=False, nullable=False)", "-*- coding: utf-8 -*- \"\"\"BO term models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK,", "name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as a", "from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A", "**kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as a unique", "Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80), unique=False, nullable=False) definition =", "'soterms' name = Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a =", "import Column, Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__", "relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent", "= 'soterms' name = Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a", "Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms'", "\"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500),", "glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False,", "-*- \"\"\"BO term models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship", "# -*- coding: utf-8 -*- \"\"\"BO term models.\"\"\" from glyphrepository.database import Column, Model,", "db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name)", "__tablename__ = 'soterms' name = Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False)", "Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent", "= relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self):", "nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm')", "definition = Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm') def", "instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name) def get_full_id(self): return \"SO:\" + str(self.id).zfill(7)", "unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self, name, **kwargs):", "nullable=True) parent = relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs)", "db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name =", "\"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as a unique string.\"\"\"", "Column, Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ =", "Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self, name,", "instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as a unique string.\"\"\" return", "__repr__(self): \"\"\"Represent instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name) def get_full_id(self): return \"SO:\"", "reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name,", "coding: utf-8 -*- \"\"\"BO term models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK, db,", "unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent =", "= Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self,", "models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship class SOterm(SurrogatePK, Model):", "utf-8 -*- \"\"\"BO term models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col,", "<reponame>BDAthlon/2017-Triple_Helix-1 # -*- coding: utf-8 -*- \"\"\"BO term models.\"\"\" from glyphrepository.database import Column,", "__init__(self, name, **kwargs): \"\"\"Create instance.\"\"\" db.Model.__init__(self, name=name, **kwargs) def __repr__(self): \"\"\"Represent instance as", "\"\"\"BO term models.\"\"\" from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship class", "nullable=False) is_a = reference_col('soterms', nullable=True) parent = relationship('SOterm') def __init__(self, name, **kwargs): \"\"\"Create", "\"\"\"Represent instance as a unique string.\"\"\" return '<Role({name})>'.format(name=self.name) def get_full_id(self): return \"SO:\" +", "reference_col, relationship class SOterm(SurrogatePK, Model): \"\"\"A glyph.\"\"\" __tablename__ = 'soterms' name = Column(db.String(80),", "name = Column(db.String(80), unique=False, nullable=False) definition = Column(db.String(500), unique=False, nullable=False) is_a = reference_col('soterms'," ]
[ "np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu =", "= n_components self.whitening = whitening self.parameters_path = parameters_path def train(self, x): '''training pca.", "torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu) U, lams, mu, Utmu", "U = torch.index_select(U, 1, k_indices) lams = L lams[lams < 1e-9] = 1e-9", "pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams)))", "mu) U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT", "whitening self.parameters_path = parameters_path def train(self, x): '''training pca. Args: x: [N, dim]", "if (self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices)", "= torch.matmul(x, x.t()) / (nPoints - 1) else: doDual = True x2 =", "< 1e-9] = 1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams))", "else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2,", "PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') #", "Utmu = pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu)", "[N, dim] FloatTensor containing data which undergoes PCA/Whitening. ''' x = x.t() nPoints", "import numpy as np import torch import torch.nn.functional as F class PCA(): def", "1e-9] = 1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) /", "= pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu =", "bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1) # IMPORTANT! assert (output.size(1)", "- 1))) Utmu = torch.matmul(U.t(), mu) U, lams, mu, Utmu = U.numpy(), lams.numpy(),", "FloatTensor with output of PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N, D =", "self.n_components = n_components self.whitening = whitening self.parameters_path = parameters_path def train(self, x): '''training", "torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu)", "torch.matmul(x, x.t()) / (nPoints - 1) else: doDual = True x2 = torch.matmul(x.t(),", "(nPoints - 1) L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices", "def infer(self, data): '''apply PCA/Whitening to data. Args: data: [N, dim] FloatTensor containing", "= np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu", "{}'.format(Utmu.shape)) print('===============================================') # save features, labels to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename,", "mu if (nDims <= nPoints): doDual = False x2 = torch.matmul(x, x.t()) /", "x) / (nPoints - 1) L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components <", "operation. ''' # import pdb;pdb.set_trace() N, D = data.size() data = data.view(N, D,", "torch import torch.nn.functional as F class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components", "1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints -", "lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path) U =", "print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels to h5 file. filename", "[N, dim] FloatTensor containing data which undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor", "data.view(N, D, 1, 1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N,", "= x.mean(1).unsqueeze(1) x = x - mu if (nDims <= nPoints): doDual =", "Utmu = torch.matmul(U.t(), mu) U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy()", "U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================')", "1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output", "PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with output of PCA/Whitening operation. ''' #", "= data.view(N, D, 1, 1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1,", "= 1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints", "mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu:", "save features, labels to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu,", "= whitening self.parameters_path = parameters_path def train(self, x): '''training pca. Args: x: [N,", "(nDims <= nPoints): doDual = False x2 = torch.matmul(x, x.t()) / (nPoints -", "- 1) L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices =", "F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1) # IMPORTANT!", "Utmu=Utmu) def load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components]", "undergoes PCA/Whitening. ''' x = x.t() nPoints = x.size(1) nDims = x.size(0) #", "-1) else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output,", "= pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight", "as F class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening", "Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams:", "output = F.normalize(output, p=2, dim=-1) # IMPORTANT! assert (output.size(1) == self.n_components) return output", "- mu if (nDims <= nPoints): doDual = False x2 = torch.matmul(x, x.t())", "RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save", "to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self):", "= np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def", "x = x - mu if (nDims <= nPoints): doDual = False x2", "'''training pca. Args: x: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. '''", "if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1)))", "labels to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def", "1) else: doDual = True x2 = torch.matmul(x.t(), x) / (nPoints - 1)", "x.mean(1).unsqueeze(1) x = x - mu if (nDims <= nPoints): doDual = False", "PCA parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu", "else: doDual = True x2 = torch.matmul(x.t(), x) / (nPoints - 1) L,", "pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T,", "FloatTensor containing data which undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with output", "numpy as np import torch import torch.nn.functional as F class PCA(): def __init__(self,", "self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N,", "/ torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu) U, lams, mu,", "output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1)", "''' x = x.t() nPoints = x.size(1) nDims = x.size(0) # x =", "lams[lams < 1e-9] = 1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. /", "/ (nPoints - 1) else: doDual = True x2 = torch.matmul(x.t(), x) /", "- 1) else: doDual = True x2 = torch.matmul(x.t(), x) / (nPoints -", "= torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(),", "x.double() mu = x.mean(1).unsqueeze(1) x = x - mu if (nDims <= nPoints):", "lams = L lams[lams < 1e-9] = 1e-9 if (doDual): U = torch.matmul(x,", "x2 = torch.matmul(x, x.t()) / (nPoints - 1) else: doDual = True x2", "data = data.view(N, D, 1, 1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(),", "L = torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1, k_indices) lams = L", "torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output = F.conv2d(data,", "x2 = torch.matmul(x.t(), x) / (nPoints - 1) L, U = torch.symeig(x2, eigenvectors=True)", "self.parameters_path = parameters_path def train(self, x): '''training pca. Args: x: [N, dim] FloatTensor", "import torch import torch.nn.functional as F class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'):", "x.t()) / (nPoints - 1) else: doDual = True x2 = torch.matmul(x.t(), x)", "as np import torch import torch.nn.functional as F class PCA(): def __init__(self, n_components=1024,", "mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data):", "(nPoints - 1) else: doDual = True x2 = torch.matmul(x.t(), x) / (nPoints", "padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1) # IMPORTANT! assert (output.size(1) == self.n_components)", "= x.size(0) # x = x.double() mu = x.mean(1).unsqueeze(1) x = x -", "doDual = False x2 = torch.matmul(x, x.t()) / (nPoints - 1) else: doDual", "os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...') pca =", "if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components,", "= torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening", "F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1,", "mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape))", "= True x2 = torch.matmul(x.t(), x) / (nPoints - 1) L, U =", "1, 1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else:", "import torch.nn.functional as F class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components =", "torch.matmul(x.t(), x) / (nPoints - 1) L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components", "torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu) U, lams,", "print('===============================================') # save features, labels to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U,", "stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1) # IMPORTANT! assert (output.size(1) ==", "n_components self.whitening = whitening self.parameters_path = parameters_path def train(self, x): '''training pca. Args:", "print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels", "x.size(0) # x = x.double() mu = x.mean(1).unsqueeze(1) x = x - mu", "< x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U =", "torch.index_select(U, 1, k_indices) lams = L lams[lams < 1e-9] = 1e-9 if (doDual):", "PCA/Whitening. ''' x = x.t() nPoints = x.size(1) nDims = x.size(0) # x", "mu = pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu", "= x - mu if (nDims <= nPoints): doDual = False x2 =", "x.t() nPoints = x.size(1) nDims = x.size(0) # x = x.double() mu =", "Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape))", "pca. Args: x: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. ''' x", "file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA", "U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu:", "torch.nn.functional as F class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components", "eigenvectors=True) if (self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0,", "lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape))", "k_indices) U = torch.index_select(U, 1, k_indices) lams = L lams[lams < 1e-9] =", "print('loading PCA parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components]", "x): '''training pca. Args: x: [N, dim] FloatTensor containing data which undergoes PCA/Whitening.", "U = torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L", "np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias", "torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to data. Args: data: [N, dim] FloatTensor", "N, D = data.size() data = data.view(N, D, 1, 1) if torch.cuda.is_available(): output", "FloatTensor containing data which undergoes PCA/Whitening. ''' x = x.t() nPoints = x.size(1)", "= data.size() data = data.view(N, D, 1, 1) if torch.cuda.is_available(): output = F.conv2d(data,", "load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams =", "output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output = F.conv2d(data, self.weight,", "0, k_indices) U = torch.index_select(U, 1, k_indices) lams = L lams[lams < 1e-9]", "= torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1, k_indices) lams = L lams[lams", "doDual = True x2 = torch.matmul(x.t(), x) / (nPoints - 1) L, U", "==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features,", "x - mu if (nDims <= nPoints): doDual = False x2 = torch.matmul(x,", "np import torch import torch.nn.functional as F class PCA(): def __init__(self, n_components=1024, whitening=True,", "x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U = torch.index_select(U,", "descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1, k_indices) lams =", "(doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu", "self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1) # IMPORTANT! assert", "= torch.matmul(x.t(), x) / (nPoints - 1) L, U = torch.symeig(x2, eigenvectors=True) if", "'''apply PCA/Whitening to data. Args: data: [N, dim] FloatTensor containing data which undergoes", "= torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L =", "U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...]", "1) L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices = torch.argsort(L,", "mu = x.mean(1).unsqueeze(1) x = x - mu if (nDims <= nPoints): doDual", "1, k_indices) lams = L lams[lams < 1e-9] = 1e-9 if (doDual): U", "= torch.matmul(U.t(), mu) U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('=================", "lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U =", "x: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. ''' x = x.t()", "def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening self.parameters_path =", "<= nPoints): doDual = False x2 = torch.matmul(x, x.t()) / (nPoints - 1)", "= False x2 = torch.matmul(x, x.t()) / (nPoints - 1) else: doDual =", "{}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels to h5 file.", "filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...')", "np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self,", "def load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams", "class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening", "= F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output = F.normalize(output, p=2, dim=-1) #", "L lams[lams < 1e-9] = 1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U, torch.diag(1.", "os import numpy as np import torch import torch.nn.functional as F class PCA():", "D, 1, 1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1)", "torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1, k_indices) lams", "mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:,", "# save features, labels to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams,", "print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================')", "1))) Utmu = torch.matmul(U.t(), mu) U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(),", "output of PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N, D = data.size() data", "import os import numpy as np import torch import torch.nn.functional as F class", "PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N, D = data.size() data = data.view(N,", "= torch.index_select(U, 1, k_indices) lams = L lams[lams < 1e-9] = 1e-9 if", "= L lams[lams < 1e-9] = 1e-9 if (doDual): U = torch.matmul(x, torch.matmul(U,", "np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path)", "= parameters_path def train(self, x): '''training pca. Args: x: [N, dim] FloatTensor containing", "= x.size(1) nDims = x.size(0) # x = x.double() mu = x.mean(1).unsqueeze(1) x", "= np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float()", "x.size(1) nDims = x.size(0) # x = x.double() mu = x.mean(1).unsqueeze(1) x =", "torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1, k_indices) lams = L lams[lams <", "/ (nPoints - 1) L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)):", "output_dim] FloatTensor with output of PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N, D", "data): '''apply PCA/Whitening to data. Args: data: [N, dim] FloatTensor containing data which", "<filename>pre_processing/pca.py import os import numpy as np import torch import torch.nn.functional as F", "[N, output_dim] FloatTensor with output of PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N,", "= torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1, k_indices)", "{}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels to h5 file. filename =", "torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to", "dim] FloatTensor containing data which undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with", "pca['Utmu'][...] if (self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight =", "F class PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening =", "infer(self, data): '''apply PCA/Whitening to data. Args: data: [N, dim] FloatTensor containing data", "output: [N, output_dim] FloatTensor with output of PCA/Whitening operation. ''' # import pdb;pdb.set_trace()", "= os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...') pca", "{}'.format(U.shape)) print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels to", "__init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening self.parameters_path = parameters_path", "data: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. Returns: output: [N, output_dim]", "k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U = torch.index_select(U, 1,", "= pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U = np.matmul(U,", "which undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with output of PCA/Whitening operation.", "self.whitening = whitening self.parameters_path = parameters_path def train(self, x): '''training pca. Args: x:", "= x.double() mu = x.mean(1).unsqueeze(1) x = x - mu if (nDims <=", "k_indices) lams = L lams[lams < 1e-9] = 1e-9 if (doDual): U =", "L, U = torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components]", "# x = x.double() mu = x.mean(1).unsqueeze(1) x = x - mu if", "dim] FloatTensor containing data which undergoes PCA/Whitening. ''' x = x.t() nPoints =", "False x2 = torch.matmul(x, x.t()) / (nPoints - 1) else: doDual = True", "pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening):", "U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu =", "= F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output = F.conv2d(data, self.weight, bias=self.bias,", "bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1)", "data which undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with output of PCA/Whitening", "Returns: output: [N, output_dim] FloatTensor with output of PCA/Whitening operation. ''' # import", "torch.symeig(x2, eigenvectors=True) if (self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L,", "containing data which undergoes PCA/Whitening. ''' x = x.t() nPoints = x.size(1) nDims", "train(self, x): '''training pca. Args: x: [N, dim] FloatTensor containing data which undergoes", "-1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to data.", "''' # import pdb;pdb.set_trace() N, D = data.size() data = data.view(N, D, 1,", "(self.whitening): U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1,", "self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply", "padding=0).view(N, -1) else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output =", "n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening self.parameters_path = parameters_path def", "1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to data. Args:", "with output of PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N, D = data.size()", "PCA(): def __init__(self, n_components=1024, whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening self.parameters_path", "Args: x: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. ''' x =", "(self.n_components < x2.size(0)): k_indices = torch.argsort(L, descending=True)[:self.n_components] L = torch.index_select(L, 0, k_indices) U", "np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu) U, lams, mu, Utmu = U.numpy(),", "stride=1, padding=0).view(N, -1) else: output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1) output", "= pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...] if", "def train(self, x): '''training pca. Args: x: [N, dim] FloatTensor containing data which", "Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float()", "1).float() self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to data. Args: data:", "data. Args: data: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. Returns: output:", ":self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...] Utmu = pca['Utmu'][...] if (self.whitening): U", "if (nDims <= nPoints): doDual = False x2 = torch.matmul(x, x.t()) / (nPoints", "if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1) else: output =", "pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu = pca['mu'][...]", "self.bias = torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to data. Args: data: [N,", "/ np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu) U, lams, mu, Utmu =", "Args: data: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. Returns: output: [N,", "True x2 = torch.matmul(x.t(), x) / (nPoints - 1) L, U = torch.symeig(x2,", "of PCA/Whitening operation. ''' # import pdb;pdb.set_trace() N, D = data.size() data =", "parameters...') pca = np.load(self.parameters_path) U = pca['U'][...][:, :self.n_components] lams = pca['lams'][...][:self.n_components] mu =", "D = data.size() data = data.view(N, D, 1, 1) if torch.cuda.is_available(): output =", "= torch.from_numpy(-Utmu).view(-1).float() def infer(self, data): '''apply PCA/Whitening to data. Args: data: [N, dim]", "pdb;pdb.set_trace() N, D = data.size() data = data.view(N, D, 1, 1) if torch.cuda.is_available():", "# import pdb;pdb.set_trace() N, D = data.size() data = data.view(N, D, 1, 1)", "= U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U: {}'.format(U.shape)) print('lams: {}'.format(lams.shape))", "torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1))) Utmu = torch.matmul(U.t(), mu) U,", "containing data which undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with output of", "-1) output = F.normalize(output, p=2, dim=-1) # IMPORTANT! assert (output.size(1) == self.n_components) return", "lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA RESULT ==================') print('U:", "= x.t() nPoints = x.size(1) nDims = x.size(0) # x = x.double() mu", "whitening=True, parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening self.parameters_path = parameters_path def train(self,", "PCA/Whitening to data. Args: data: [N, dim] FloatTensor containing data which undergoes PCA/Whitening.", "h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading", "x = x.double() mu = x.mean(1).unsqueeze(1) x = x - mu if (nDims", "undergoes PCA/Whitening. Returns: output: [N, output_dim] FloatTensor with output of PCA/Whitening operation. '''", "parameters_path def train(self, x): '''training pca. Args: x: [N, dim] FloatTensor containing data", "U=U, lams=lams, mu=mu, Utmu=Utmu) def load(self): print('loading PCA parameters...') pca = np.load(self.parameters_path) U", "parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'): self.n_components = n_components self.whitening = whitening self.parameters_path = parameters_path def train(self, x):", "x = x.t() nPoints = x.size(1) nDims = x.size(0) # x = x.double()", "which undergoes PCA/Whitening. ''' x = x.t() nPoints = x.size(1) nDims = x.size(0)", "torch.matmul(U.t(), mu) U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy() print('================= PCA", "print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels to h5 file. filename = os.path.join(self.parameters_path)", "U = np.matmul(U, np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1,", "to data. Args: data: [N, dim] FloatTensor containing data which undergoes PCA/Whitening. Returns:", "data which undergoes PCA/Whitening. ''' x = x.t() nPoints = x.size(1) nDims =", "nDims = x.size(0) # x = x.double() mu = x.mean(1).unsqueeze(1) x = x", "print('lams: {}'.format(lams.shape)) print('mu: {}'.format(mu.shape)) print('Utmu: {}'.format(Utmu.shape)) print('===============================================') # save features, labels to h5", "features, labels to h5 file. filename = os.path.join(self.parameters_path) np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu)", "nPoints): doDual = False x2 = torch.matmul(x, x.t()) / (nPoints - 1) else:", "import pdb;pdb.set_trace() N, D = data.size() data = data.view(N, D, 1, 1) if", "np.diag(1./np.sqrt(lams))) Utmu = np.matmul(U.T, mu) self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float() self.bias =", "nPoints = x.size(1) nDims = x.size(0) # x = x.double() mu = x.mean(1).unsqueeze(1)", "data.size() data = data.view(N, D, 1, 1) if torch.cuda.is_available(): output = F.conv2d(data, self.weight.cuda()," ]
[ "# Padding input inputPad = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width), range(0,", "imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1])", "computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration from timeit import itertools ''' Parameters:", "from timeit import itertools ''' Parameters: pathToDir = Input image directory imageName =", "in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute", "x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) #", "heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input inputPad = createImageF(widthPad, heightPad) for x,y", "coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show", "= imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel", "by using the Fourier transform ''' # Set module functions from ImageUtilities import", "9 # Read image into array inputImage, width, height = imageReadL(pathToDir + imageName)", "maxFrequencyH + 1)): w = kw + maxFrequencyW h = kh + maxFrequencyH", "Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage =", "+ imageName) # Show input image showImageL(inputImage) # Create Kernel kernelImage = createImageF(width,", "for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w", "imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show the", "kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w =", "= \"Eye.png\" kernelSize = 9 # Read image into array inputImage, width, height", "= 9 # Read image into array inputImage, width, height = imageReadL(pathToDir +", "- \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0]", "powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency", "image name kernelSize = Size of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName", "+ 1)): w = kw + maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0]", ", 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH +", "import imageLogF # Iteration from timeit import itertools ''' Parameters: pathToDir = Input", "kernelSize = 9 # Read image into array inputImage, width, height = imageReadL(pathToDir", "kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _,", "<NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image by using the", "templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power", "createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2) for", "reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0, width),", "kernelImage = createImageF(width, height) # Set the pixels of a flat kernel for", "Filter an image by using the Fourier transform ''' # Set module functions", "Input image name kernelSize = Size of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\"", "inputPad = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x] =", "Frequency domain multiplication resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2", "inputImage, width, height = imageReadL(pathToDir + imageName) # Show input image showImageL(inputImage) #", "Image Processing <NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image by", "Size of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize =", "= kw + maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] *", "\"Eye.png\" kernelSize = 9 # Read image into array inputImage, width, height =", "computeCoefficients(templatePadFlip) # Show the log of the power of the input image and", "width), range(0, height)): outPad[y,x] = outputImage[y + halfKernel, x + halfKernel] # Show", "computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration from timeit import itertools", "input image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate =", "kernelSize = Size of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\"", "pixels of a flat kernel for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y,", "= createImageF(width, height) halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0, height)):", "powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate)", "in itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y + halfKernel, x + halfKernel]", "imageName = Input image name kernelSize = Size of the kernel ''' pathToDir", "''' Parameters: pathToDir = Input image directory imageName = Input image name kernelSize", "imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog)", "height = imageReadL(pathToDir + imageName) # Show input image showImageL(inputImage) # Create Kernel", "showImageL, createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import", "template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog =", "result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff)", "input inputPad = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x]", "imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff = createImageF(1 + 2 * maxFrequencyW,", "in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w = kw", "range(-maxFrequencyH, maxFrequencyH + 1)): w = kw + maxFrequencyW h = kh +", "of a flat kernel for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x]", "Extraction and Image Processing <NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an", "a flat kernel for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] =", "maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] =", "= reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0,", "= computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog)", "of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9", "createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration", "power of the input image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage)", "* templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff)", "import itertools ''' Parameters: pathToDir = Input image directory imageName = Input image", "Parameters: pathToDir = Input image directory imageName = Input image name kernelSize =", "itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w = kw +", "1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w = kw + maxFrequencyW h =", "width), range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding and flip template templatePadFlip =", "itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients", "computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width,", "for x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding and", "range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding and flip template templatePadFlip = createImageF(widthPad,", "showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1", "h = kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1]", "\\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult)", "* templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) #", "height) # Set the pixels of a flat kernel for x,y in itertools.product(range(0,", "imageName) # Show input image showImageL(inputImage) # Create Kernel kernelImage = createImageF(width, height)", "Processing <NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image by using", "width+kernelSize-1, height+kernelSize-1 # Padding input inputPad = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0,", "# Padding and flip template templatePadFlip = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0,", "from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration from", "size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input inputPad = createImageF(widthPad, heightPad)", "= createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] =", "powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height)", "showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel = int(kernelSize/2)", "+ maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1]", "array inputImage, width, height = imageReadL(pathToDir + imageName) # Show input image showImageL(inputImage)", "= kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff,", "createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1,", "# Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage", "# Show input image showImageL(inputImage) # Create Kernel kernelImage = createImageF(width, height) #", "Padding and flip template templatePadFlip = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize),", "+ 2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2) for kw,kh", "kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9 # Read", "using the Fourier transform ''' # Set module functions from ImageUtilities import imageReadL,", "= Input image directory imageName = Input image name kernelSize = Size of", "(imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult =", "domain multiplication resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2 *", "the Fourier transform ''' # Set module functions from ImageUtilities import imageReadL, showImageL,", "= createImageF(width, height) # Set the pixels of a flat kernel for x,y", "ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients", "= createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x]", "halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y", "the pixels of a flat kernel for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):", "\\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] *", "ImageOperatorsUtilities import imageLogF # Iteration from timeit import itertools ''' Parameters: pathToDir =", "height)): inputPad[y,x] = inputImage[y,x] # Padding and flip template templatePadFlip = createImageF(widthPad, heightPad)", "templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction", "# Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel = int(kernelSize/2) for", "module functions from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities import", "Padding input inputPad = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width), range(0, height)):", "imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities", "_, _ = computeCoefficients(templatePadFlip) # Show the log of the power of the", "w = kw + maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0]", "kernel for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0 #", "= computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad =", "Fourier transform ''' # Set module functions from ImageUtilities import imageReadL, showImageL, createImageL,", "pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9 # Read image into", "Set the pixels of a flat kernel for x,y in itertools.product(range(0, kernelSize), range(0,", "imageLogF # Iteration from timeit import itertools ''' Parameters: pathToDir = Input image", "# Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip)", "Create Kernel kernelImage = createImageF(width, height) # Set the pixels of a flat", "showImageL(inputImage) # Create Kernel kernelImage = createImageF(width, height) # Set the pixels of", "showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF #", "in itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding and flip template", "maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH", "computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) #", "* templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) #", "showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ =", "Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel = int(kernelSize/2) for x,y", "log of the power of the input image and template powerImage = computePowerfromCoefficients(imageCoeff)", "FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration from timeit", "outPad = createImageF(width, height) halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0,", "''' # Set module functions from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF", "from ImageOperatorsUtilities import imageLogF # Iteration from timeit import itertools ''' Parameters: pathToDir", "kw + maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0]", "= computeCoefficients(templatePadFlip) # Show the log of the power of the input image", "maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w = kw + maxFrequencyW", "# Set module functions from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF from", "(imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0]", "and Image Processing <NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image", "imageReadL(pathToDir + imageName) # Show input image showImageL(inputImage) # Create Kernel kernelImage =", "templatePadFlip = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x]", "in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0 # Padding size widthPad,", "powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog = imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad", "imageLogF(powerResult) showImageF(powerResultLog) # Reconstruction outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel =", "of the power of the input image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog", "Kernel kernelImage = createImageF(width, height) # Set the pixels of a flat kernel", "maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\", "Feature Extraction and Image Processing <NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter", "templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH =", "\\ range(-maxFrequencyH, maxFrequencyH + 1)): w = kw + maxFrequencyW h = kh", "the input image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate", "2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)):", "maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show the log", "kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH", "image directory imageName = Input image name kernelSize = Size of the kernel", "# Read image into array inputImage, width, height = imageReadL(pathToDir + imageName) #", "range(0, kernelSize)): kernelImage[y, x] = 255.0 # Padding size widthPad, heightPad = width+kernelSize-1,", "itertools ''' Parameters: pathToDir = Input image directory imageName = Input image name", "flip template templatePadFlip = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):", "x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad)", "kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1])", "functions from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients,", "pathToDir = Input image directory imageName = Input image name kernelSize = Size", "_ = computeCoefficients(templatePadFlip) # Show the log of the power of the input", "+ 2 * maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1),", "1)): w = kw + maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0] =", "createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x] #", "timeit import itertools ''' Parameters: pathToDir = Input image directory imageName = Input", "= Size of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize", "inputPad[y,x] = inputImage[y,x] # Padding and flip template templatePadFlip = createImageF(widthPad, heightPad) for", "Show the log of the power of the input image and template powerImage", "itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding and flip template templatePadFlip", "for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0 # Padding", "# Set the pixels of a flat kernel for x,y in itertools.product(range(0, kernelSize),", "width, height = imageReadL(pathToDir + imageName) # Show input image showImageL(inputImage) # Create", "= imageReadL(pathToDir + imageName) # Show input image showImageL(inputImage) # Create Kernel kernelImage", "= imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff = createImageF(1 + 2 *", "= kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] *", "= 255.0 # Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input", "imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication", "showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff", "Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input inputPad = createImageF(widthPad,", "* maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\ range(-maxFrequencyH,", "resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1]", "image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff)", "3 FourierConvolution: Filter an image by using the Fourier transform ''' # Set", "for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip)", "for x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y + halfKernel, x", "kernelImage[y, x] = 255.0 # Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 #", "image by using the Fourier transform ''' # Set module functions from ImageUtilities", "imageName = \"Eye.png\" kernelSize = 9 # Read image into array inputImage, width,", "x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding and flip", "http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image by using the Fourier transform '''", "# Show the log of the power of the input image and template", "''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9 # Read image", "= imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain", "height) halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x] =", "Read image into array inputImage, width, height = imageReadL(pathToDir + imageName) # Show", "inputImage[y,x] # Padding and flip template templatePadFlip = createImageF(widthPad, heightPad) for x,y in", "kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _", "resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result", "heightPad) for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1]", "computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff = createImageF(1 +", "outputImage = reconstruction(resultCoeff) outPad = createImageF(width, height) halfKernel = int(kernelSize/2) for x,y in", "''' Feature Extraction and Image Processing <NAME> & <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution:", "# Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input inputPad =", "widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input inputPad = createImageF(widthPad, heightPad) for", "createImageF(width, height) # Set the pixels of a flat kernel for x,y in", "the log of the power of the input image and template powerImage =", "2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2) for kw,kh in", "kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0 # Padding size widthPad, heightPad =", "maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show the log of", "import imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from", "Chapter 3 FourierConvolution: Filter an image by using the Fourier transform ''' #", "templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show the log of the power of", "powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff = createImageF(1 + 2", "= int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y +", "\"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9 # Read image into array inputImage,", "= inputImage[y,x] # Padding and flip template templatePadFlip = createImageF(widthPad, heightPad) for x,y", "image into array inputImage, width, height = imageReadL(pathToDir + imageName) # Show input", "from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction,", "Iteration from timeit import itertools ''' Parameters: pathToDir = Input image directory imageName", "kernelSize), range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff,", "range(0, kernelSize)): templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1] showImageF(templatePadFlip) # Compute coefficients imageCoeff, maxFrequencyW,", "+ 1), \\ range(-maxFrequencyH, maxFrequencyH + 1)): w = kw + maxFrequencyW h", "height+kernelSize-1 # Padding input inputPad = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, width),", "outPad[y,x] = outputImage[y + halfKernel, x + halfKernel] # Show filter image showImageF(outPad)", "the power of the input image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog =", "= Input image name kernelSize = Size of the kernel ''' pathToDir =", "kernelSize)): kernelImage[y, x] = 255.0 # Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1", "of the input image and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog)", "createImageF(width, height) halfKernel = int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x]", "resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH ,", "& <NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image by using the Fourier", "= computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show the log of the", "= \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9 # Read image into array", "x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y + halfKernel, x +", "an image by using the Fourier transform ''' # Set module functions from", "Compute coefficients imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) #", "= computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff = createImageF(1", "heightPad) for x,y in itertools.product(range(0, width), range(0, height)): inputPad[y,x] = inputImage[y,x] # Padding", "and flip template templatePadFlip = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize), range(0,", "directory imageName = Input image name kernelSize = Size of the kernel '''", "name kernelSize = Size of the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName =", "image showImageL(inputImage) # Create Kernel kernelImage = createImageF(width, height) # Set the pixels", "Show input image showImageL(inputImage) # Create Kernel kernelImage = createImageF(width, height) # Set", "255.0 # Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding input inputPad", "flat kernel for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0", "templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog", "range(0, height)): outPad[y,x] = outputImage[y + halfKernel, x + halfKernel] # Show filter", "# Iteration from timeit import itertools ''' Parameters: pathToDir = Input image directory", "and template powerImage = computePowerfromCoefficients(imageCoeff) powerImageLog = imageLogF(powerImage) showImageF(powerImageLog) powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog", "<NAME> http://www.southampton.ac.uk/~msn/book/ Chapter 3 FourierConvolution: Filter an image by using the Fourier transform", "* maxFrequencyW, 1 + 2 * maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW,", "FourierConvolution: Filter an image by using the Fourier transform ''' # Set module", "* templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] +", "transform ''' # Set module functions from ImageUtilities import imageReadL, showImageL, createImageL, showImageF,", "createImageL, showImageF, createImageF from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF", "x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0 # Padding size", "itertools.product(range(0, kernelSize), range(0, kernelSize)): kernelImage[y, x] = 255.0 # Padding size widthPad, heightPad", "x] = 255.0 # Padding size widthPad, heightPad = width+kernelSize-1, height+kernelSize-1 # Padding", "templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\", "# Create Kernel kernelImage = createImageF(width, height) # Set the pixels of a", "2 * maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \\", "powerTemplate = computePowerfromCoefficients(templateCoeff) powerTemplateLog = imageLogF(powerTemplate) showImageF(powerTemplateLog) # Frequency domain multiplication resultCoeff =", "Input image directory imageName = Input image name kernelSize = Size of the", "Set module functions from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF from FourierUtilities", "= createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2)", "+ maxFrequencyW h = kh + maxFrequencyH resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] -", "= (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult", "multiplication resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH", "the kernel ''' pathToDir = \"../../Images/Chapter3/Input/\" imageName = \"Eye.png\" kernelSize = 9 #", "# Frequency domain multiplication resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 +", "height)): outPad[y,x] = outputImage[y + halfKernel, x + halfKernel] # Show filter image", "template templatePadFlip = createImageF(widthPad, heightPad) for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)): templatePadFlip[y,", "input image showImageL(inputImage) # Create Kernel kernelImage = createImageF(width, height) # Set the", "= width+kernelSize-1, height+kernelSize-1 # Padding input inputPad = createImageF(widthPad, heightPad) for x,y in", "+ \\ imageCoeff[h,w][0] * templateCoeff[h,w][1]) # Power result powerResult = computePowerfromCoefficients(resultCoeff) powerResultLog =", "itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y + halfKernel, x + halfKernel] #", "int(kernelSize/2) for x,y in itertools.product(range(0, width), range(0, height)): outPad[y,x] = outputImage[y + halfKernel,", "maxFrequencyW, 1 + 2 * maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW", "1 + 2 * maxFrequencyH , 2) for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW +", "into array inputImage, width, height = imageReadL(pathToDir + imageName) # Show input image", "reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration from timeit import itertools '''", "computeCoefficients(inputPad) templateCoeff, _, _ = computeCoefficients(templatePadFlip) # Show the log of the power", "= (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \\ imageCoeff[h,w][1] * templateCoeff[h,w][1]) resultCoeff[h,w][1] = (imageCoeff[h,w][1] *", "import computeCoefficients, reconstruction, computePowerfromCoefficients from ImageOperatorsUtilities import imageLogF # Iteration from timeit import" ]
[ "update keys def get_weights_list(self): \"\"\" Returns: A list of all weights the node", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "bytes elif self.final_weights_quantization_cfg is None: # float coefficients memory = params * 4", "as the weight. \"\"\" res = [k for k in self.weights.keys() if name", "or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node", "None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits /", "shape of the node. output_shape: Input tensor shape of the node. weights: Dictionary", "node (for example, for logging in TensorBoard) we need a way to create", "a reused layer. reuse_group: Name of group of nodes from the same reused", "node was duplicated and represents a reused layer. reuse_group: Name of group of", "it if not exist. Args: name: Name of the weight the node holds.", "# in bytes return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can", "under the License. # ============================================================================== import copy from typing import Dict, Any, Tuple", "assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that represents", "aimed to build such an unified dictionary for a node. Returns: A dictionary", "for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\"", "None): \"\"\" Init a Node object. Args: name: Node's name framework_attr: Framework attributes", "create a single dictionary from all candidates. This method is aimed to build", "this file except in compliance with the License. # You may obtain a", "self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\" Returns:", "# ============================================================================== import copy from typing import Dict, Any, Tuple import numpy as", "None]) assert int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None)", "typing import Dict, Any, Tuple import numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE,", "Any, Tuple import numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode:", "k in self.weights.keys() if name in k] if len(res) == 1: self.weights[res[0]] =", "node's memory requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx is not None: assert", "type, reuse: bool = False, reuse_group: str = None, quantization_attr: Dict[str, Any] =", "name. Args: name: Name of the variable for a node's weight. Returns: A", "def __repr__(self): \"\"\" Returns: String that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def", "elif self.final_weights_quantization_cfg is None: # float coefficients memory = params * 4 else:", "if len(res) == 1: # Make sure there are no duplicates return self.weights[res[0]]", "Returns: A node's weight (by its name). \"\"\" res = [k for k", "shared_attributes: if shared_attr in attr: unified_attr = [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate,", "ANY KIND, either express or implied. # See the License for the specific", "None self.prior_info = None @property def type(self): \"\"\" A function to get the", "in self.weights.keys() if name in k] if len(res) == 1: # Make sure", "the node. weights: Dictionary from a variable name to the weights with that", "\"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__)", "= [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr", "float coefficients memory = params * 4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits", "-> np.ndarray: \"\"\" Get a node's weight by its name. Args: name: Name", "self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that", "weight the node holds. tensor: Numpy array to set as the weight. \"\"\"", "graph that represents the model. \"\"\" def __init__(self, name: str, framework_attr: Dict[str, Any],", "its name. Args: name: Name of the variable for a node's weight. Returns:", "reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None", "None) -> float: \"\"\" Returns: Number of bytes the node's memory requires. \"\"\"", "if shared_attr in attr: unified_attr = [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate, shared_attr))", "configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr", "to represent a node in a graph that represents the model. \"\"\" def", "tensor shape of the node. weights: Dictionary from a variable name to the", "def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node weights quantization is enabled or", "weight by its name. Args: name: Name of the variable for a node's", "Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0", "layer. quantization_attr: Attributes the node holds regarding how it should be quantized. \"\"\"", "A function to get the node's layer_class op for convenient comparison :return: the", "[self.weights[k] for k in self.weights.keys() if self.weights[k] is not None] def get_num_parameters(self) ->", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "in a graph that represents the model. \"\"\" def __init__(self, name: str, framework_attr:", "variable for a node's weight. Returns: A node's weight (by its name). \"\"\"", "node. weights: Dictionary from a variable name to the weights with that name", "holds. \"\"\" return [self.weights[k] for k in self.weights.keys() if self.weights[k] is not None]", "reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "None: # float coefficients memory = params * 4 else: memory = params", "not exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) # update keys def get_weights_list(self):", "__init__(self, name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray],", "self.weights[res[0]] = tensor else: # Add if not exist self.weights[name] = tensor self.weights_keys", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# Add if not exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) # update", "Add if not exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) # update keys", "in the layer the node represents. layer_class: Class path of the layer this", "name framework_attr: Framework attributes the layer had which the node holds. input_shape: Input", "containing information from node's weight quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE]", "OF ANY KIND, either express or implied. # See the License for the", "input_shape self.output_shape = output_shape self.weights = weights self.layer_class = layer_class self.reuse = reuse", "# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved. # # Licensed", "[k for k in self.weights.keys() if name in k] if len(res) == 1:", "layer_class self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None", "with that name in the layer the node represents. layer_class: Class path of", "framework_attr: Framework attributes the layer had which the node holds. input_shape: Input tensor", "multiple candidates for weights quantization configuration. In order to display a single view", "== node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None) -> float: \"\"\"", "name: Name of the variable for a node's weight. Returns: A node's weight", "CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent a node in a graph that", "node's weight by its name. Args: name: Name of the variable for a", "for weights quantization configuration. In order to display a single view of a", "Class to represent a node in a graph that represents the model. \"\"\"", "to build such an unified dictionary for a node. Returns: A dictionary containing", "self.weights[res[0]] else: return None def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set a", "node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node", "weight to one of the existing node's weights, or add it if not", "def get_num_parameters(self) -> int: \"\"\" Returns: Number of parameters the node holds. \"\"\"", "Dict, Any, Tuple import numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class", "node's layer_class op for convenient comparison :return: the node's layer_class \"\"\" return self.layer_class", "for convenient comparison :return: the node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) ->", "node holds. tensor: Numpy array to set as the weight. \"\"\" res =", "name self.framework_attr = framework_attr self.quantization_attr = quantization_attr if quantization_attr is not None else", "order to display a single view of a node (for example, for logging", "\"\"\" A function to get the node's layer_class op for convenient comparison :return:", "quantized. \"\"\" self.name = name self.framework_attr = framework_attr self.quantization_attr = quantization_attr if quantization_attr", "params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return memory def get_unified_candidates_dict(self): \"\"\"", "of bytes the node's memory requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx is", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "the node holds regarding how it should be quantized. \"\"\" self.name = name", "Mixed-Precision, a node can have multiple candidates for weights quantization configuration. In order", "In Mixed-Precision, a node can have multiple candidates for weights quantization configuration. In", "shape of the node. weights: Dictionary from a variable name to the weights", "represents. layer_class: Class path of the layer this node represents. reuse: Whether this", "quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled():", "\"\"\" In Mixed-Precision, a node can have multiple candidates for weights quantization configuration.", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "of nodes from the same reused layer. quantization_attr: Attributes the node holds regarding", "import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent a node in a", "weights, or add it if not exist. Args: name: Name of the weight", "Returns: Whether node weights quantization is enabled or not. \"\"\" for qc in", "Returns: String that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str)", "Name of the variable for a node's weight. Returns: A node's weight (by", "Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse: bool = False, reuse_group: str =", "reused layer. quantization_attr: Attributes the node holds regarding how it should be quantized.", "Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse: bool", "quantization_attr is not None else dict() self.input_shape = input_shape self.output_shape = output_shape self.weights", "attr: unified_attr = [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate, shared_attr)) attr[shared_attr] = unified_attr", "Name of group of nodes from the same reused layer. quantization_attr: Attributes the", "group of nodes from the same reused layer. quantization_attr: Attributes the node holds", "holds. tensor: Numpy array to set as the weight. \"\"\" res = [k", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "node holds. \"\"\" return [self.weights[k] for k in self.weights.keys() if self.weights[k] is not", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "duplicated and represents a reused layer. reuse_group: Name of group of nodes from", "as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent", "shared_attr in shared_attributes: if shared_attr in attr: unified_attr = [] for candidate in", "object. Args: name: Node's name framework_attr: Framework attributes the layer had which the", "self.final_weights_quantization_cfg is None: # float coefficients memory = params * 4 else: memory", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\" Returns: A list of all weights", "holds regarding how it should be quantized. \"\"\" self.name = name self.framework_attr =", "are no duplicates return self.weights[res[0]] else: return None def set_weights_by_keys(self, name: str, tensor:", "in shared_attributes: if shared_attr in attr: unified_attr = [] for candidate in self.candidates_weights_quantization_cfg:", "BaseNode: \"\"\" Class to represent a node in a graph that represents the", "required by applicable law or agreed to in writing, software # distributed under", "list of all weights the node holds. \"\"\" return [self.weights[k] for k in", "applicable law or agreed to in writing, software # distributed under the License", "str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type,", "be quantized. \"\"\" self.name = name self.framework_attr = framework_attr self.quantization_attr = quantization_attr if", "get the node's layer_class op for convenient comparison :return: the node's layer_class \"\"\"", "of the variable for a node's weight. Returns: A node's weight (by its", "or agreed to in writing, software # distributed under the License is distributed", "should be quantized. \"\"\" self.name = name self.framework_attr = framework_attr self.quantization_attr = quantization_attr", "__repr__(self): \"\"\" Returns: String that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "All rights reserved. # # Licensed under the Apache License, Version 2.0 (the", "add it if not exist. Args: name: Name of the weight the node", "represents. reuse: Whether this node was duplicated and represents a reused layer. reuse_group:", "logging in TensorBoard) we need a way to create a single dictionary from", "display a single view of a node (for example, for logging in TensorBoard)", "in bytes return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can have", "and represents a reused layer. reuse_group: Name of group of nodes from the", "# in bytes elif self.final_weights_quantization_cfg is None: # float coefficients memory = params", "copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr in attr: unified_attr = [] for", "by_candidate_idx: int = None) -> float: \"\"\" Returns: Number of bytes the node's", "Whether node activation quantization is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self)", "the node's layer_class op for convenient comparison :return: the node's layer_class \"\"\" return", "str, tensor: np.ndarray): \"\"\" Set a new weight to one of the existing", "parameters the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "the variable for a node's weight. Returns: A node's weight (by its name).", "layer this node represents. reuse: Whether this node was duplicated and represents a", "writing, software # distributed under the License is distributed on an \"AS IS\"", "framework_attr self.quantization_attr = quantization_attr if quantization_attr is not None else dict() self.input_shape =", "copy from typing import Dict, Any, Tuple import numpy as np from model_compression_toolkit.common.constants", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "== 1: # Make sure there are no duplicates return self.weights[res[0]] else: return", "node's weights, or add it if not exist. Args: name: Name of the", "candidates. This method is aimed to build such an unified dictionary for a", "License. # You may obtain a copy of the License at # #", "1: # Make sure there are no duplicates return self.weights[res[0]] else: return None", "None else dict() self.input_shape = input_shape self.output_shape = output_shape self.weights = weights self.layer_class", "Numpy array to set as the weight. \"\"\" res = [k for k", "qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns:", "a node. Returns: A dictionary containing information from node's weight quantization configuration candidates.", "compliance with the License. # You may obtain a copy of the License", "duplicates return self.weights[res[0]] else: return None def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\"", "or not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization", "np.ndarray], layer_class: type, reuse: bool = False, reuse_group: str = None, quantization_attr: Dict[str,", "Attributes the node holds regarding how it should be quantized. \"\"\" self.name =", "= reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info =", "configuration. In order to display a single view of a node (for example,", "A list of all weights the node holds. \"\"\" return [self.weights[k] for k", "<reponame>eladc-git/model_optimization # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved. # #", "the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get", "else dict() self.input_shape = input_shape self.output_shape = output_shape self.weights = weights self.layer_class =", "for a node. Returns: A dictionary containing information from node's weight quantization configuration", "if name in k] if len(res) == 1: self.weights[res[0]] = tensor else: #", "self.candidates_weights_quantization_cfg = None self.prior_info = None @property def type(self): \"\"\" A function to", "in bytes elif self.final_weights_quantization_cfg is None: # float coefficients memory = params *", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "name: Name of the weight the node holds. tensor: Numpy array to set", "activation quantization is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool:", "a node's weight by its name. Args: name: Name of the variable for", "not None]) assert int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int =", "name: str) -> np.ndarray: \"\"\" Get a node's weight by its name. Args:", "such an unified dictionary for a node. Returns: A dictionary containing information from", "bool: \"\"\" Returns: Whether node weights quantization is enabled or not. \"\"\" for", "unified_attr = [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate, shared_attr)) attr[shared_attr] = unified_attr return", "None, quantization_attr: Dict[str, Any] = None): \"\"\" Init a Node object. Args: name:", "* self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif self.final_weights_quantization_cfg is None: # float", "framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse:", "in TensorBoard) we need a way to create a single dictionary from all", "node. Returns: A dictionary containing information from node's weight quantization configuration candidates. \"\"\"", "weights quantization is enabled or not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization", "self.weights.keys() if name in k] if len(res) == 1: self.weights[res[0]] = tensor else:", "this node was duplicated and represents a reused layer. reuse_group: Name of group", "* 4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes", "from node's weight quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr =", "by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes", "weight. Returns: A node's weight (by its name). \"\"\" res = [k for", "self.layer_class = layer_class self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg", "of a node (for example, for logging in TensorBoard) we need a way", "self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node activation quantization is enabled", "in attr: unified_attr = [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate, shared_attr)) attr[shared_attr] =", "not exist. Args: name: Name of the weight the node holds. tensor: Numpy", "not use this file except in compliance with the License. # You may", "str = None, quantization_attr: Dict[str, Any] = None): \"\"\" Init a Node object.", "\"\"\" Returns: A list of all weights the node holds. \"\"\" return [self.weights[k]", "\"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self):", "the node. output_shape: Input tensor shape of the node. weights: Dictionary from a", "License, Version 2.0 (the \"License\"); # you may not use this file except", "regarding how it should be quantized. \"\"\" self.name = name self.framework_attr = framework_attr", "None] def get_num_parameters(self) -> int: \"\"\" Returns: Number of parameters the node holds.", "a variable name to the weights with that name in the layer the", "layer_class: Class path of the layer this node represents. reuse: Whether this node", "from the same reused layer. quantization_attr: Attributes the node holds regarding how it", "tensor: np.ndarray): \"\"\" Set a new weight to one of the existing node's", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "name to the weights with that name in the layer the node represents.", "1: self.weights[res[0]] = tensor else: # Add if not exist self.weights[name] = tensor", "node activation quantization is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) ->", "TensorBoard) we need a way to create a single dictionary from all candidates.", "the same reused layer. quantization_attr: Attributes the node holds regarding how it should", "by_candidate_idx is not None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params", "def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set a new weight to one", "# you may not use this file except in compliance with the License.", "the weight. \"\"\" res = [k for k in self.weights.keys() if name in", "memory requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx is not None: assert type(by_candidate_idx)==int", "self.get_num_parameters() if by_candidate_idx is not None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory", "agreed to in writing, software # distributed under the License is distributed on", "a node in a graph that represents the model. \"\"\" def __init__(self, name:", "int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None) -> float:", "Class path of the layer this node represents. reuse: Whether this node was", "is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns:", "= quantization_attr if quantization_attr is not None else dict() self.input_shape = input_shape self.output_shape", "from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent a node", "Returns: Number of parameters the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v", "= None self.candidates_weights_quantization_cfg = None self.prior_info = None @property def type(self): \"\"\" A", "(the \"License\"); # you may not use this file except in compliance with", "-> bool: \"\"\" Returns: Whether node activation quantization is enabled or not. \"\"\"", "weights: Dict[str, np.ndarray], layer_class: type, reuse: bool = False, reuse_group: str = None,", "convenient comparison :return: the node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool:", "is not None]) assert int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int", "layer. reuse_group: Name of group of nodes from the same reused layer. quantization_attr:", "is not None else dict() self.input_shape = input_shape self.output_shape = output_shape self.weights =", "self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}'", "# Unless required by applicable law or agreed to in writing, software #", "dictionary from all candidates. This method is aimed to build such an unified", "Semiconductors Israel, Inc. All rights reserved. # # Licensed under the Apache License,", "WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes:", "view of a node (for example, for logging in TensorBoard) we need a", "by applicable law or agreed to in writing, software # distributed under the", "node weights quantization is enabled or not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert", "a single view of a node (for example, for logging in TensorBoard) we", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent a node in a graph", "= name self.framework_attr = framework_attr self.quantization_attr = quantization_attr if quantization_attr is not None", "holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if v is not", "int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None) -> float: \"\"\" Returns: Number of", "there are no duplicates return self.weights[res[0]] else: return None def set_weights_by_keys(self, name: str,", "np.ndarray): \"\"\" Set a new weight to one of the existing node's weights,", "= input_shape self.output_shape = output_shape self.weights = weights self.layer_class = layer_class self.reuse =", "the weights with that name in the layer the node represents. layer_class: Class", "# update keys def get_weights_list(self): \"\"\" Returns: A list of all weights the", "node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get a", "\"\"\" self.name = name self.framework_attr = framework_attr self.quantization_attr = quantization_attr if quantization_attr is", "file except in compliance with the License. # You may obtain a copy", "8 # in bytes elif self.final_weights_quantization_cfg is None: # float coefficients memory =", "/ 8 # in bytes return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a", "= [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate, shared_attr)) attr[shared_attr] = unified_attr return attr", "self.prior_info = None @property def type(self): \"\"\" A function to get the node's", "quantization is enabled or not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization ==", "sure there are no duplicates return self.weights[res[0]] else: return None def set_weights_by_keys(self, name:", "Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse: bool =", "Args: name: Name of the variable for a node's weight. Returns: A node's", "= None): \"\"\" Init a Node object. Args: name: Node's name framework_attr: Framework", "(for example, for logging in TensorBoard) we need a way to create a", "self.weights.values() if v is not None]) assert int(node_num_params) == node_num_params return int(node_num_params) def", "candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr =", "in k] if len(res) == 1: # Make sure there are no duplicates", "License for the specific language governing permissions and # limitations under the License.", "self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info", "float: \"\"\" Returns: Number of bytes the node's memory requires. \"\"\" params =", "to in writing, software # distributed under the License is distributed on an", "len(res) == 1: # Make sure there are no duplicates return self.weights[res[0]] else:", "String that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) ->", "name in k] if len(res) == 1: self.weights[res[0]] = tensor else: # Add", "function to get the node's layer_class op for convenient comparison :return: the node's", "k] if len(res) == 1: self.weights[res[0]] = tensor else: # Add if not", "tensor else: # Add if not exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys())", "self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr in attr: unified_attr", "type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 #", "reuse: Whether this node was duplicated and represents a reused layer. reuse_group: Name", "implied. # See the License for the specific language governing permissions and #", "\"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node activation quantization", "\"License\"); # you may not use this file except in compliance with the", "return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node weights quantization is", "= tensor else: # Add if not exist self.weights[name] = tensor self.weights_keys =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif self.final_weights_quantization_cfg", "\"\"\" return [self.weights[k] for k in self.weights.keys() if self.weights[k] is not None] def", "None self.candidates_weights_quantization_cfg = None self.prior_info = None @property def type(self): \"\"\" A function", "not None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits", "Init a Node object. Args: name: Node's name framework_attr: Framework attributes the layer", "single view of a node (for example, for logging in TensorBoard) we need", "Sony Semiconductors Israel, Inc. All rights reserved. # # Licensed under the Apache", "is enabled or not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization", "Returns: Number of bytes the node's memory requires. \"\"\" params = self.get_num_parameters() if", "Number of parameters the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in", "Israel, Inc. All rights reserved. # # Licensed under the Apache License, Version", "dictionary containing information from node's weight quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE,", "layer_class op for convenient comparison :return: the node's layer_class \"\"\" return self.layer_class def", "\"\"\" Returns: Number of bytes the node's memory requires. \"\"\" params = self.get_num_parameters()", "way to create a single dictionary from all candidates. This method is aimed", "the License. # ============================================================================== import copy from typing import Dict, Any, Tuple import", "or implied. # See the License for the specific language governing permissions and", "to get the node's layer_class op for convenient comparison :return: the node's layer_class", "None def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set a new weight to", "type(self): \"\"\" A function to get the node's layer_class op for convenient comparison", "if quantization_attr is not None else dict() self.input_shape = input_shape self.output_shape = output_shape", "if len(res) == 1: self.weights[res[0]] = tensor else: # Add if not exist", "self.weights.keys() if self.weights[k] is not None] def get_num_parameters(self) -> int: \"\"\" Returns: Number", "output_shape self.weights = weights self.layer_class = layer_class self.reuse = reuse self.reuse_group = reuse_group", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "weight (by its name). \"\"\" res = [k for k in self.weights.keys() if", "reused layer. reuse_group: Name of group of nodes from the same reused layer.", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can have multiple candidates for", "\"\"\" Returns: Whether node activation quantization is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization", "all candidates. This method is aimed to build such an unified dictionary for", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "quantization_attr: Dict[str, Any] = None): \"\"\" Init a Node object. Args: name: Node's", "represents the model. \"\"\" def __init__(self, name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any],", "candidates for weights quantization configuration. In order to display a single view of", "exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\"", "self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg", "Returns: Whether node activation quantization is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "the node holds. input_shape: Input tensor shape of the node. output_shape: Input tensor", "return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None) -> float: \"\"\" Returns: Number", "of the existing node's weights, or add it if not exist. Args: name:", "f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get a node's weight by", "============================================================================== import copy from typing import Dict, Any, Tuple import numpy as np", "specific language governing permissions and # limitations under the License. # ============================================================================== import", "that represents the model. \"\"\" def __init__(self, name: str, framework_attr: Dict[str, Any], input_shape:", "in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String", "= tensor self.weights_keys = list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\" Returns: A", "get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can have multiple candidates for weights quantization", "example, for logging in TensorBoard) we need a way to create a single", "node can have multiple candidates for weights quantization configuration. In order to display", "weights the node holds. \"\"\" return [self.weights[k] for k in self.weights.keys() if self.weights[k]", "if name in k] if len(res) == 1: # Make sure there are", "model. \"\"\" def __init__(self, name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any],", "# limitations under the License. # ============================================================================== import copy from typing import Dict,", "tensor shape of the node. output_shape: Input tensor shape of the node. weights:", "= params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif self.final_weights_quantization_cfg is None:", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "node holds regarding how it should be quantized. \"\"\" self.name = name self.framework_attr", "Args: name: Name of the weight the node holds. tensor: Numpy array to", "a node (for example, for logging in TensorBoard) we need a way to", "you may not use this file except in compliance with the License. #", "attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr in attr: unified_attr =", "\"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node weights quantization", "that name in the layer the node represents. layer_class: Class path of the", "import Dict, Any, Tuple import numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE", "int: \"\"\" Returns: Number of parameters the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0]", "# Make sure there are no duplicates return self.weights[res[0]] else: return None def", "quantization is enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\"", "not None else dict() self.input_shape = input_shape self.output_shape = output_shape self.weights = weights", "bytes the node's memory requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx is not", "if by_candidate_idx is not None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory =", "import numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class", "set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set a new weight to one of", "weights quantization configuration. In order to display a single view of a node", "if not exist. Args: name: Name of the weight the node holds. tensor:", "use this file except in compliance with the License. # You may obtain", "a node can have multiple candidates for weights quantization configuration. In order to", "from typing import Dict, Any, Tuple import numpy as np from model_compression_toolkit.common.constants import", "(by its name). \"\"\" res = [k for k in self.weights.keys() if name", "[CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in", "layer the node represents. layer_class: Class path of the layer this node represents.", "self.output_shape = output_shape self.weights = weights self.layer_class = layer_class self.reuse = reuse self.reuse_group", "the node represents. layer_class: Class path of the layer this node represents. reuse:", "tensor self.weights_keys = list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\" Returns: A list", "node represents. layer_class: Class path of the layer this node represents. reuse: Whether", "output_shape: Input tensor shape of the node. weights: Dictionary from a variable name", "which the node holds. input_shape: Input tensor shape of the node. output_shape: Input", "not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def", "node in a graph that represents the model. \"\"\" def __init__(self, name: str,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse: bool = False, reuse_group:", "assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in", "of group of nodes from the same reused layer. quantization_attr: Attributes the node", "def get_weights_list(self): \"\"\" Returns: A list of all weights the node holds. \"\"\"", "of the node. weights: Dictionary from a variable name to the weights with", "output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse: bool = False, reuse_group: str", "v is not None]) assert int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx:", "is not None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params *", "for shared_attr in shared_attributes: if shared_attr in attr: unified_attr = [] for candidate", "memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif self.final_weights_quantization_cfg is", "we need a way to create a single dictionary from all candidates. This", "the layer this node represents. reuse: Whether this node was duplicated and represents", "\"\"\" Get a node's weight by its name. Args: name: Name of the", "int = None) -> float: \"\"\" Returns: Number of bytes the node's memory", "return [self.weights[k] for k in self.weights.keys() if self.weights[k] is not None] def get_num_parameters(self)", "had which the node holds. input_shape: Input tensor shape of the node. output_shape:", "\"\"\" def __init__(self, name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights:", "2.0 (the \"License\"); # you may not use this file except in compliance", "Set a new weight to one of the existing node's weights, or add", "from a variable name to the weights with that name in the layer", "Returns: A dictionary containing information from node's weight quantization configuration candidates. \"\"\" shared_attributes", "tensor: Numpy array to set as the weight. \"\"\" res = [k for", "new weight to one of the existing node's weights, or add it if", "model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent a node in", "array to set as the weight. \"\"\" res = [k for k in", "for the specific language governing permissions and # limitations under the License. #", "self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node weights quantization is enabled", "Args: name: Node's name framework_attr: Framework attributes the layer had which the node", "\"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if v is not None])", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node weights quantization is enabled or not.", "input_shape: Input tensor shape of the node. output_shape: Input tensor shape of the", "same reused layer. quantization_attr: Attributes the node holds regarding how it should be", "Get a node's weight by its name. Args: name: Name of the variable", "= None @property def type(self): \"\"\" A function to get the node's layer_class", "exist. Args: name: Name of the weight the node holds. tensor: Numpy array", "if v is not None]) assert int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self,", "reuse_group: str = None, quantization_attr: Dict[str, Any] = None): \"\"\" Init a Node", "return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can have multiple candidates", "a single dictionary from all candidates. This method is aimed to build such", "k in self.weights.keys() if self.weights[k] is not None] def get_num_parameters(self) -> int: \"\"\"", "# # Unless required by applicable law or agreed to in writing, software", "for k in self.weights.keys() if name in k] if len(res) == 1: self.weights[res[0]]", "Tuple import numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\"", "bool = False, reuse_group: str = None, quantization_attr: Dict[str, Any] = None): \"\"\"", "express or implied. # See the License for the specific language governing permissions", "not None] def get_num_parameters(self) -> int: \"\"\" Returns: Number of parameters the node", "Dict[str, np.ndarray], layer_class: type, reuse: bool = False, reuse_group: str = None, quantization_attr:", "dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr in", "name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class:", "This method is aimed to build such an unified dictionary for a node.", "= self.get_num_parameters() if by_candidate_idx is not None: assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg)", "Name of the weight the node holds. tensor: Numpy array to set as", "return self.weights[res[0]] else: return None def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set", "not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node weights", "for k in self.weights.keys() if name in k] if len(res) == 1: #", "dict() self.input_shape = input_shape self.output_shape = output_shape self.weights = weights self.layer_class = layer_class", "all weights the node holds. \"\"\" return [self.weights[k] for k in self.weights.keys() if", "how it should be quantized. \"\"\" self.name = name self.framework_attr = framework_attr self.quantization_attr", "node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if v is", "memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return memory def", "Input tensor shape of the node. output_shape: Input tensor shape of the node.", "/ 8 # in bytes elif self.final_weights_quantization_cfg is None: # float coefficients memory", "either express or implied. # See the License for the specific language governing", "for logging in TensorBoard) we need a way to create a single dictionary", "= None) -> float: \"\"\" Returns: Number of bytes the node's memory requires.", "Node's name framework_attr: Framework attributes the layer had which the node holds. input_shape:", "shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for", "= params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return memory def get_unified_candidates_dict(self):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "a node's weight. Returns: A node's weight (by its name). \"\"\" res =", "Framework attributes the layer had which the node holds. input_shape: Input tensor shape", "of parameters the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values()", "node's weight (by its name). \"\"\" res = [k for k in self.weights.keys()", "reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info = None", "False, reuse_group: str = None, quantization_attr: Dict[str, Any] = None): \"\"\" Init a", "to the weights with that name in the layer the node represents. layer_class:", "is None: # float coefficients memory = params * 4 else: memory =", "was duplicated and represents a reused layer. reuse_group: Name of group of nodes", "= params * 4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 #", "< len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif", "qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that represents the node. \"\"\"", "def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node activation quantization is enabled or", "node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None) -> float: \"\"\" Returns:", "return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get a node's weight", "if self.weights[k] is not None] def get_num_parameters(self) -> int: \"\"\" Returns: Number of", "of all weights the node holds. \"\"\" return [self.weights[k] for k in self.weights.keys()", "the License. # You may obtain a copy of the License at #", "attr = dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if", "params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif self.final_weights_quantization_cfg is None: #", "memory = params * 4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8", "else: return None def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set a new", "@property def type(self): \"\"\" A function to get the node's layer_class op for", "self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes elif self.final_weights_quantization_cfg is None: # float coefficients", "from all candidates. This method is aimed to build such an unified dictionary", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "\"\"\" Class to represent a node in a graph that represents the model.", "Returns: A list of all weights the node holds. \"\"\" return [self.weights[k] for", "= dict() if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "-> bool: \"\"\" Returns: Whether node weights quantization is enabled or not. \"\"\"", "to set as the weight. \"\"\" res = [k for k in self.weights.keys()", "op for convenient comparison :return: the node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self)", "is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node activation quantization is enabled or not.", "one of the existing node's weights, or add it if not exist. Args:", "else: # Add if not exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) #", "= False, reuse_group: str = None, quantization_attr: Dict[str, Any] = None): \"\"\" Init", "-> int: \"\"\" Returns: Number of parameters the node holds. \"\"\" node_num_params =", "len(res) == 1: self.weights[res[0]] = tensor else: # Add if not exist self.weights[name]", "None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info = None @property def type(self):", "for a node's weight. Returns: A node's weight (by its name). \"\"\" res", "# float coefficients memory = params * 4 else: memory = params *", "== qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that represents the node.", "\"\"\" Returns: String that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name:", "return None def set_weights_by_keys(self, name: str, tensor: np.ndarray): \"\"\" Set a new weight", "name: Node's name framework_attr: Framework attributes the layer had which the node holds.", "that represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray:", "import copy from typing import Dict, Any, Tuple import numpy as np from", "holds. input_shape: Input tensor shape of the node. output_shape: Input tensor shape of", "represents the node. \"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\"", "= weights self.layer_class = layer_class self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg =", "get_num_parameters(self) -> int: \"\"\" Returns: Number of parameters the node holds. \"\"\" node_num_params", "== 1: self.weights[res[0]] = tensor else: # Add if not exist self.weights[name] =", "weights: Dictionary from a variable name to the weights with that name in", "\"\"\" Returns: Number of parameters the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for", "with the License. # You may obtain a copy of the License at", "A dictionary containing information from node's weight quantization configuration candidates. \"\"\" shared_attributes =", "the model. \"\"\" def __init__(self, name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape:", "path of the layer this node represents. reuse: Whether this node was duplicated", "variable name to the weights with that name in the layer the node", "nodes from the same reused layer. quantization_attr: Attributes the node holds regarding how", "class BaseNode: \"\"\" Class to represent a node in a graph that represents", "reuse_group: Name of group of nodes from the same reused layer. quantization_attr: Attributes", "self.weights_keys = list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\" Returns: A list of", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "governing permissions and # limitations under the License. # ============================================================================== import copy from", "\"\"\" Returns: Whether node weights quantization is enabled or not. \"\"\" for qc", "= np.sum([v.flatten().shape[0] for v in self.weights.values() if v is not None]) assert int(node_num_params)", "single dictionary from all candidates. This method is aimed to build such an", "language governing permissions and # limitations under the License. # ============================================================================== import copy", "rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", ":return: the node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns:", "def type(self): \"\"\" A function to get the node's layer_class op for convenient", "params * 4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in", "law or agreed to in writing, software # distributed under the License is", "self.weights[k] is not None] def get_num_parameters(self) -> int: \"\"\" Returns: Number of parameters", "the License for the specific language governing permissions and # limitations under the", "for k in self.weights.keys() if self.weights[k] is not None] def get_num_parameters(self) -> int:", "for v in self.weights.values() if v is not None]) assert int(node_num_params) == node_num_params", "this node represents. reuse: Whether this node was duplicated and represents a reused", "np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to represent a", "params = self.get_num_parameters() if by_candidate_idx is not None: assert type(by_candidate_idx)==int assert by_candidate_idx <", "dictionary for a node. Returns: A dictionary containing information from node's weight quantization", "self.quantization_attr = quantization_attr if quantization_attr is not None else dict() self.input_shape = input_shape", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "k in self.weights.keys() if name in k] if len(res) == 1: # Make", "Whether this node was duplicated and represents a reused layer. reuse_group: Name of", "\"\"\" return f'{self.type.__name__}:{self.name}' def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get a node's", "it should be quantized. \"\"\" self.name = name self.framework_attr = framework_attr self.quantization_attr =", "no duplicates return self.weights[res[0]] else: return None def set_weights_by_keys(self, name: str, tensor: np.ndarray):", "layer had which the node holds. input_shape: Input tensor shape of the node.", "node holds. input_shape: Input tensor shape of the node. output_shape: Input tensor shape", "node represents. reuse: Whether this node was duplicated and represents a reused layer.", "\"\"\" Init a Node object. Args: name: Node's name framework_attr: Framework attributes the", "or add it if not exist. Args: name: Name of the weight the", "Node object. Args: name: Node's name framework_attr: Framework attributes the layer had which", "name in k] if len(res) == 1: # Make sure there are no", "represents a reused layer. reuse_group: Name of group of nodes from the same", "= None self.prior_info = None @property def type(self): \"\"\" A function to get", "weight quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict() if", "in compliance with the License. # You may obtain a copy of the", "weights with that name in the layer the node represents. layer_class: Class path", "= output_shape self.weights = weights self.layer_class = layer_class self.reuse = reuse self.reuse_group =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node activation", "str) -> np.ndarray: \"\"\" Get a node's weight by its name. Args: name:", "to display a single view of a node (for example, for logging in", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "limitations under the License. # ============================================================================== import copy from typing import Dict, Any,", "if self.is_weights_quantization_enabled(): attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr in attr:", "numpy as np from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE class BaseNode: \"\"\" Class to", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self.name = name self.framework_attr = framework_attr self.quantization_attr = quantization_attr if quantization_attr is not", "node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if v is not None]) assert", "enabled or not. \"\"\" for qc in self.candidates_weights_quantization_cfg: assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return", "input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str, np.ndarray], layer_class: type, reuse: bool = False,", "bool: \"\"\" Returns: Whether node activation quantization is enabled or not. \"\"\" return", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "2021 Sony Semiconductors Israel, Inc. All rights reserved. # # Licensed under the", "a graph that represents the model. \"\"\" def __init__(self, name: str, framework_attr: Dict[str,", "= framework_attr self.quantization_attr = quantization_attr if quantization_attr is not None else dict() self.input_shape", "get_memory_bytes(self, by_candidate_idx: int = None) -> float: \"\"\" Returns: Number of bytes the", "can have multiple candidates for weights quantization configuration. In order to display a", "Input tensor shape of the node. weights: Dictionary from a variable name to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "else: memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return memory", "\"\"\" params = self.get_num_parameters() if by_candidate_idx is not None: assert type(by_candidate_idx)==int assert by_candidate_idx", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "= None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info = None @property def", "an unified dictionary for a node. Returns: A dictionary containing information from node's", "res = [k for k in self.weights.keys() if name in k] if len(res)", "In order to display a single view of a node (for example, for", "\"\"\" res = [k for k in self.weights.keys() if name in k] if", "k] if len(res) == 1: # Make sure there are no duplicates return", "node. output_shape: Input tensor shape of the node. weights: Dictionary from a variable", "node's weight. Returns: A node's weight (by its name). \"\"\" res = [k", "name). \"\"\" res = [k for k in self.weights.keys() if name in k]", "set as the weight. \"\"\" res = [k for k in self.weights.keys() if", "None @property def type(self): \"\"\" A function to get the node's layer_class op", "= list(self.weights.keys()) # update keys def get_weights_list(self): \"\"\" Returns: A list of all", "in self.weights.keys() if self.weights[k] is not None] def get_num_parameters(self) -> int: \"\"\" Returns:", "its name). \"\"\" res = [k for k in self.weights.keys() if name in", "to create a single dictionary from all candidates. This method is aimed to", "keys def get_weights_list(self): \"\"\" Returns: A list of all weights the node holds.", "Dictionary from a variable name to the weights with that name in the", "a new weight to one of the existing node's weights, or add it", "Dict[str, Any] = None): \"\"\" Init a Node object. Args: name: Node's name", "need a way to create a single dictionary from all candidates. This method", "self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision,", "Number of bytes the node's memory requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx", "the layer had which the node holds. input_shape: Input tensor shape of the", "and # limitations under the License. # ============================================================================== import copy from typing import", "self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that represents the", "method is aimed to build such an unified dictionary for a node. Returns:", "information from node's weight quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr", "= layer_class self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg =", "the existing node's weights, or add it if not exist. Args: name: Name", "weights self.layer_class = layer_class self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None", "Version 2.0 (the \"License\"); # you may not use this file except in", "return self.candidates_weights_quantization_cfg[0].enable_weights_quantization def __repr__(self): \"\"\" Returns: String that represents the node. \"\"\" return", "License. # ============================================================================== import copy from typing import Dict, Any, Tuple import numpy", "except in compliance with the License. # You may obtain a copy of", "Make sure there are no duplicates return self.weights[res[0]] else: return None def set_weights_by_keys(self,", "the specific language governing permissions and # limitations under the License. # ==============================================================================", "self.framework_attr = framework_attr self.quantization_attr = quantization_attr if quantization_attr is not None else dict()", "requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx is not None: assert type(by_candidate_idx)==int assert", "shared_attr in attr: unified_attr = [] for candidate in self.candidates_weights_quantization_cfg: unified_attr.append(getattr(candidate, shared_attr)) attr[shared_attr]", "quantization configuration. In order to display a single view of a node (for", "the node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether", "comparison :return: the node's layer_class \"\"\" return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\"", "return self.layer_class def is_activation_quantization_enabled(self) -> bool: \"\"\" Returns: Whether node activation quantization is", "a way to create a single dictionary from all candidates. This method is", "of the weight the node holds. tensor: Numpy array to set as the", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info = None @property def type(self): \"\"\"", "if not exist self.weights[name] = tensor self.weights_keys = list(self.weights.keys()) # update keys def", "permissions and # limitations under the License. # ============================================================================== import copy from typing", "the node's memory requires. \"\"\" params = self.get_num_parameters() if by_candidate_idx is not None:", "bytes return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can have multiple", "= [k for k in self.weights.keys() if name in k] if len(res) ==", "\"\"\" Set a new weight to one of the existing node's weights, or", "is not None] def get_num_parameters(self) -> int: \"\"\" Returns: Number of parameters the", "def __init__(self, name: str, framework_attr: Dict[str, Any], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[str,", "build such an unified dictionary for a node. Returns: A dictionary containing information", "self.weights.keys() if name in k] if len(res) == 1: # Make sure there", "reuse: bool = False, reuse_group: str = None, quantization_attr: Dict[str, Any] = None):", "the layer the node represents. layer_class: Class path of the layer this node", "to one of the existing node's weights, or add it if not exist.", "get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get a node's weight by its name.", "the node holds. \"\"\" node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if v", "np.sum([v.flatten().shape[0] for v in self.weights.values() if v is not None]) assert int(node_num_params) ==", "coefficients memory = params * 4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits /", "in k] if len(res) == 1: self.weights[res[0]] = tensor else: # Add if", "A node's weight (by its name). \"\"\" res = [k for k in", "weight. \"\"\" res = [k for k in self.weights.keys() if name in k]", "4 else: memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return", "is aimed to build such an unified dictionary for a node. Returns: A", "the node holds. tensor: Numpy array to set as the weight. \"\"\" res", "of the layer this node represents. reuse: Whether this node was duplicated and", "node's weight quantization configuration candidates. \"\"\" shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE] attr = dict()", "name: str, tensor: np.ndarray): \"\"\" Set a new weight to one of the", "get_weights_list(self): \"\"\" Returns: A list of all weights the node holds. \"\"\" return", "have multiple candidates for weights quantization configuration. In order to display a single", "Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved. # # Licensed under", "np.ndarray: \"\"\" Get a node's weight by its name. Args: name: Name of", "attributes the layer had which the node holds. input_shape: Input tensor shape of", "self.input_shape = input_shape self.output_shape = output_shape self.weights = weights self.layer_class = layer_class self.reuse", "by its name. Args: name: Name of the variable for a node's weight.", "= reuse self.reuse_group = reuse_group self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg =", "in self.weights.values() if v is not None]) assert int(node_num_params) == node_num_params return int(node_num_params)", "-> float: \"\"\" Returns: Number of bytes the node's memory requires. \"\"\" params", "8 # in bytes return memory def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node", "assert int(node_num_params) == node_num_params return int(node_num_params) def get_memory_bytes(self, by_candidate_idx: int = None) ->", "the node holds. \"\"\" return [self.weights[k] for k in self.weights.keys() if self.weights[k] is", "unified dictionary for a node. Returns: A dictionary containing information from node's weight", "enabled or not. \"\"\" return self.activation_quantization_cfg.enable_activation_quantization def is_weights_quantization_enabled(self) -> bool: \"\"\" Returns: Whether", "* self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes return memory def get_unified_candidates_dict(self): \"\"\" In", "quantization_attr if quantization_attr is not None else dict() self.input_shape = input_shape self.output_shape =", "self.weights = weights self.layer_class = layer_class self.reuse = reuse self.reuse_group = reuse_group self.activation_quantization_cfg", "def get_unified_candidates_dict(self): \"\"\" In Mixed-Precision, a node can have multiple candidates for weights", "Whether node weights quantization is enabled or not. \"\"\" for qc in self.candidates_weights_quantization_cfg:", "a Node object. Args: name: Node's name framework_attr: Framework attributes the layer had", "= None, quantization_attr: Dict[str, Any] = None): \"\"\" Init a Node object. Args:", "quantization_attr: Attributes the node holds regarding how it should be quantized. \"\"\" self.name", "assert type(by_candidate_idx)==int assert by_candidate_idx < len(self.candidates_weights_quantization_cfg) memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8", "in self.weights.keys() if name in k] if len(res) == 1: self.weights[res[0]] = tensor", "self.activation_quantization_cfg = None self.final_weights_quantization_cfg = None self.candidates_weights_quantization_cfg = None self.prior_info = None @property", "name in the layer the node represents. layer_class: Class path of the layer", "existing node's weights, or add it if not exist. Args: name: Name of", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "v in self.weights.values() if v is not None]) assert int(node_num_params) == node_num_params return", "= copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__) for shared_attr in shared_attributes: if shared_attr in attr: unified_attr = []", "represent a node in a graph that represents the model. \"\"\" def __init__(self,", "the weight the node holds. tensor: Numpy array to set as the weight.", "def get_weights_by_keys(self, name: str) -> np.ndarray: \"\"\" Get a node's weight by its", "Any] = None): \"\"\" Init a Node object. Args: name: Node's name framework_attr:", "of the node. output_shape: Input tensor shape of the node. weights: Dictionary from", "layer_class: type, reuse: bool = False, reuse_group: str = None, quantization_attr: Dict[str, Any]", "def get_memory_bytes(self, by_candidate_idx: int = None) -> float: \"\"\" Returns: Number of bytes" ]
[ "fnmatch import os try: from setuptools import setup, find_packages except ImportError: import distribute_setup", "'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [ 'epiocms = epiocms.main:main', ], },", "from setuptools import setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import", "filename in filenames: filepath = os.path.join(dirpath, filename) failed = False for pattern in", "import os try: from setuptools import setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools()", ":: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html',", "__future__ import with_statement import epiocms import fnmatch import os try: from setuptools import", "ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages with open('README.rst', 'r') as", "failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD',", "in filenames: filepath = os.path.join(dirpath, filename) failed = False for pattern in ('*.py',", "fnmatch.fnmatchcase(filename, pattern): failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/',", ":: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD", "'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [ 'epiocms = epiocms.main:main',", "continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line", "setuptools import setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import setup,", "version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for the django", "author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for the django CMS for ep.io', long_description=long_desc,", "coding: utf-8 -*- from __future__ import with_statement import epiocms import fnmatch import os", ":: 2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt',", "os try: from setuptools import setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from", "if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart", "Python', 'Programming Language :: Python :: 2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(),", "Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License',", ":: Python :: 2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [", "command line app for the django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development", "for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment ::", "'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py',", "'*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True if failed:", "Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating", "media_files = [] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename", "'media')): for filename in filenames: filepath = os.path.join(dirpath, filename) failed = False for", "filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames: filepath = os.path.join(dirpath, filename)", "'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python ::", "2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py',", "zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment ::", "'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [ 'epiocms = epiocms.main:main', ], }, )", "import fnmatch import os try: from setuptools import setup, find_packages except ImportError: import", "], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files,", "download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for the django CMS for", "open('README.rst', 'r') as fobj: long_desc = fobj.read() media_files = [] for dirpath, dirnames,", "import with_statement import epiocms import fnmatch import os try: from setuptools import setup,", "Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License", "dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames: filepath = os.path.join(dirpath,", "Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System ::", "platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, },", "import setup, find_packages with open('README.rst', 'r') as fobj: long_desc = fobj.read() media_files =", "System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2',", "'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [", "[ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [ 'epiocms", "description='Quickstart command line app for the django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[", "import epiocms import fnmatch import os try: from setuptools import setup, find_packages except", ":: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language ::", "= True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>',", "utf-8 -*- from __future__ import with_statement import epiocms import fnmatch import os try:", "= False for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename,", "for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed", "fobj: long_desc = fobj.read() media_files = [] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms',", "- Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers',", "line app for the django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status", "as fobj: long_desc = fobj.read() media_files = [] for dirpath, dirnames, filenames in", "dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames: filepath =", ":: BSD License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language", ":: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Topic", "setup, find_packages with open('README.rst', 'r') as fobj: long_desc = fobj.read() media_files = []", "'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [ 'epiocms =", "BSD License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language ::", "License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python", "from __future__ import with_statement import epiocms import fnmatch import os try: from setuptools", "'*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart',", "'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts': [ 'epiocms = epiocms.main:main', ],", "Language :: Python :: 2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms':", "media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app", "5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience ::", "find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages with open('README.rst',", "fobj.read() media_files = [] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for", "'.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:]))", "except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages with open('README.rst', 'r')", "CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment", "license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for the django CMS for ep.io',", "pattern): failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart',", "'r') as fobj: long_desc = fobj.read() media_files = [] for dirpath, dirnames, filenames", "package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={ 'console_scripts':", "failed = False for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if", "import setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages", "in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True", "OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language :: Python',", "Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved", "pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed =", "for the django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5", "'data', 'media')): for filename in filenames: filepath = os.path.join(dirpath, filename) failed = False", "= fobj.read() media_files = [] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')):", "with open('README.rst', 'r') as fobj: long_desc = fobj.read() media_files = [] for dirpath,", "setuptools import setup, find_packages with open('README.rst', 'r') as fobj: long_desc = fobj.read() media_files", "-*- from __future__ import with_statement import epiocms import fnmatch import os try: from", "try: from setuptools import setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools", "('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True if", "Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended", "setup, find_packages except ImportError: import distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages with", "False for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern):", "author_email='<EMAIL>', description='Quickstart command line app for the django CMS for ep.io', long_description=long_desc, zip_safe=False,", "url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for the django CMS", "Approved :: BSD License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming", "Python :: 2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini',", "'License :: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language", "app for the django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status ::", ":: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI", "-*- coding: utf-8 -*- from __future__ import with_statement import epiocms import fnmatch import", "distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages with open('README.rst', 'r') as fobj: long_desc", "long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment", "'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment',", "find_packages with open('README.rst', 'r') as fobj: long_desc = fobj.read() media_files = [] for", "from setuptools import setup, find_packages with open('README.rst', 'r') as fobj: long_desc = fobj.read()", "epiocms import fnmatch import os try: from setuptools import setup, find_packages except ImportError:", "name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for the", "'*~', '.*', '*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True if failed: continue", "= os.path.join(dirpath, filename) failed = False for pattern in ('*.py', '*.pyc', '*~', '.*',", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web", "'Programming Language :: Python :: 2', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), package_data={", "long_desc = fobj.read() media_files = [] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data',", "ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console',", "True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>',", "Language :: Python', 'Programming Language :: Python :: 2', 'Topic :: Utilities', ],", ":: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience", "distribute_setup.use_setuptools() from setuptools import setup, find_packages with open('README.rst', 'r') as fobj: long_desc =", "os.path.join(dirpath, filename) failed = False for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak',", "if fnmatch.fnmatchcase(filename, pattern): failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__,", "Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming", "[] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames:", "= [] for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in", "filename) failed = False for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'):", "Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Topic ::", "Utilities', ], platforms='any', packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+", "filenames: filepath = os.path.join(dirpath, filename) failed = False for pattern in ('*.py', '*.pyc',", "filepath = os.path.join(dirpath, filename) failed = False for pattern in ('*.py', '*.pyc', '*~',", "'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved ::", "import distribute_setup distribute_setup.use_setuptools() from setuptools import setup, find_packages with open('README.rst', 'r') as fobj:", "# -*- coding: utf-8 -*- from __future__ import with_statement import epiocms import fnmatch", "for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames: filepath", ":: Python', 'Programming Language :: Python :: 2', 'Topic :: Utilities', ], platforms='any',", "for filename in filenames: filepath = os.path.join(dirpath, filename) failed = False for pattern", "django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable',", ":: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: Unix',", "in os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames: filepath = os.path.join(dirpath, filename) failed", "packages=find_packages(), package_data={ 'epiocms': [ 'data/epio.ini', 'data/requirements.txt', 'data/urls.py', 'data/settings.py', 'data/templates/*.html', ]+ media_files, }, entry_points={", "with_statement import epiocms import fnmatch import os try: from setuptools import setup, find_packages", "failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command", "'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System", "os.walk(os.path.join('epiocms', 'data', 'media')): for filename in filenames: filepath = os.path.join(dirpath, filename) failed =", "the django CMS for ep.io', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 -", "'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License ::", "'*.bak', '*.swp*'): if fnmatch.fnmatchcase(filename, pattern): failed = True if failed: continue media_files.append(os.path.join(*filepath.split(os.sep)[1:])) setup(", "'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Topic :: Utilities',", "setup( name='django-cms-epio-quickstart', version=epiocms.__version__, url='https://github.com/ojii/django-cms-epio-quickstart/', download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart', license='BSD', author='<NAME>', author_email='<EMAIL>', description='Quickstart command line app for" ]
[ "= test test = base.replace(' ', ' ') return base def get_info(data_line): data_use", "('author', 'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.', '') for k,", "pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume']", "'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of the Royal Astronomical", "data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red = data.replace(' ',", "D', } if key in lib: return lib[key] else: return key mon_num =", "out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in out and", "and '))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0] else: return authors def vol_page(pd):", "= data_line.split(' = ') keys = [d.split(',')[-1] for d in data_use][:-1] values =", "\"This paper is about the number 1. The number 2 is left for", "rm_spaces(string): base = string test = base.replace('\\t', ' ') test = test.replace(' ',", "#'excerpt: \"This paper is about the number 1. The number 2 is left", "the number 1. The number 2 is left for future work.', #'[Download paper", "' ') while data!=data_red: data = data_red data_red = data.replace(' ', ' ')", "\"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about the number 1.", "data_line.split(' = ') keys = [d.split(',')[-1] for d in data_use][:-1] values = [','.join(d.split(',')[:-1])", "'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(),", "data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f = open('_publications/%s'%name, 'w') print(content, file=f)", "Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review D', } if key in", "import re import calendar def journals(key): lib = { 'arXiv e-prints': 'ArXiv', r'\\apj':", "' ') test = test.replace(' ', ' ') while test!=base: base = test", "<i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper", "data = data_red data_red = data.replace(' ', ' ') data = data.split('@ARTICLE{') data", "') while data!=data_red: data = data_red data_red = data.replace(' ', ' ') data", "= { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of the", "' ') while test!=base: base = test test = base.replace(' ', ' ')", "dat in data for d in dat.split('@MISC{') if d!=''] for dat in data:", "#'', #'Recommended citation: Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>.", "in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base = string test = base.replace('\\t', '", "\"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal", "#'This paper is about the number 1. The number 2 is left for", "'journal', 'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.', '') for k, v in", "Title Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s", "filename = '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for k, fmt in [('doi',", "= out['author'].replace('\\\\', '') return out def abb_author(authors): if len(authors.split(' and '))>2: return '%s", "k in pd ]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink:", "out = {k: '' for k in ('author', 'title', 'journal', 'volume', 'page', 'month',", "'Astrophysical Journal', '\\mnras': 'Monthly Notices of the Royal Astronomical Society', r'\\aap': 'Astronomy and", "pd['year']), '---', '', links, #'This paper is about the number 1. The number", "2 is left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your", "'howpublished' in out and out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return", "else: return authors def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return '", "') keys = [d.split(',')[-1] for d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d", "data = data.replace('\\n', '') data_red = data.replace(' ', ' ') while data!=data_red: data", "'') return out def abb_author(authors): if len(authors.split(' and '))>2: return '%s et al'%authors.split('", "len(authors.split(' and '))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0] else: return authors def", "v out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in out", "] if k in pd ]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection:", "1(1).', '', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']),", "values = [rm_spaces(v) for v in values] out = {k: '' for k", "The number 2 is left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended", "else: return key mon_num = {v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr) if", "{v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base =", "and ')[0].split(',')[0] else: return authors def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='':", "lib: return lib[key] else: return key mon_num = {v.lower(): \"%02d\"%k for k, v", "journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper is about the number", "gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links = ',", "'month', 'year')} out['ref'] = values[0].replace('.', '') for k, v in zip(keys, values[1:]): #out[k.replace('", "\"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links,", "Name, You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot;", "Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review D', } if key", "%s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()])", "def abb_author(authors): if len(authors.split(' and '))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0] else:", "return out def abb_author(authors): if len(authors.split(' and '))>2: return '%s et al'%authors.split(' and", "= out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out def abb_author(authors): if len(authors.split(' and", "'))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0] else: return authors def vol_page(pd): if", "zip(keys, values[1:]): #out[k.replace(' ', '')] = v out[k.replace(' ', '')] = v.replace('{', '').replace('}',", "for d in dat.split('@MISC{') if d!=''] for dat in data: #print(gen_file(get_info(dat))) name, content", "out['author'] = out['author'].replace('\\\\', '') return out def abb_author(authors): if len(authors.split(' and '))>2: return", "about the number 1. The number 2 is left for future work.\"', 'date:", "k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in", "and out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out def abb_author(authors):", "k in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.', '')", "Journal', '\\mnras': 'Monthly Notices of the Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics',", "in out and out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out", "<i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(),", "if k in pd ]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications',", "'[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in pd ]) content = '\\n'.join([ '---',", "= v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in out and out['journal']=='': out['journal'] =", "', '')] = v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in out and out['journal']=='':", "future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You. (2009). \"Paper", "= test.replace(' ', ' ') while test!=base: base = test test = base.replace('", "journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__': import", "1. The number 2 is left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'',", "publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about the number 1. The number", "number 1. The number 2 is left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)',", "and Astrophysics', r'\\prd': 'Physical Review D', } if key in lib: return lib[key]", "abb_author(authors): if len(authors.split(' and '))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0] else: return", "is left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name,", "Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review D', } if", "paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You. (2009). \"Paper Title Number 1.\"", "1</i>. 1(1).', '', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'],", "def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links =", "enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base = string test = base.replace('\\t', ' ')", "inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red", "'')] = v out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished'", "You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s", "for d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in data_use] values =", "'%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for k, fmt", "]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This", "elif pd['page']=='': return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd)", "&quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'],", "'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about the number 1. The number 2", "data.replace('\\n', '') data_red = data.replace(' ', ' ') while data!=data_red: data = data_red", "'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>.", "= data.replace(' ', ' ') data = data.split('@ARTICLE{') data = [d for dat", "data_red = data.replace(' ', ' ') while data!=data_red: data = data_red data_red =", "base = string test = base.replace('\\t', ' ') test = test.replace(' ', '", "data_red = data.replace(' ', ' ') data = data.split('@ARTICLE{') data = [d for", "#'Recommended citation: Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).',", "for k, v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base = string test", "= {v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base", "1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd),", "= v out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in", "return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date =", "for k in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.',", "d in data_use] values = [rm_spaces(v) for v in values] out = {k:", "pd ]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt:", "k>0} def rm_spaces(string): base = string test = base.replace('\\t', ' ') test =", "\"%02d\"%k for k, v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base = string", "[','.join(d.split(',')[:-1]) for d in data_use] values = [rm_spaces(v) for v in values] out", "%s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ])", "Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation:", "in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.', '') for", "content)) return filename, content if __name__=='__main__': import sys inbib = sys.argv[1] data =", "e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of the Royal Astronomical Society',", "= [','.join(d.split(',')[:-1]) for d in data_use] values = [rm_spaces(v) for v in values]", "print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red = data.replace(' ', ' ') while data!=data_red:", "'')] = v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in out and out['journal']=='': out['journal']", "return ' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page'])", "test test = base.replace(' ', ' ') return base def get_info(data_line): data_use =", "key mon_num = {v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr) if k>0} def", "1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']),", "out['ref'] = values[0].replace('.', '') for k, v in zip(keys, values[1:]): #out[k.replace(' ', '')]", "base.replace('\\t', ' ') test = test.replace(' ', ' ') while test!=base: base =", "values] out = {k: '' for k in ('author', 'title', 'journal', 'volume', 'page',", "mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for k, fmt in", "base.replace(' ', ' ') return base def get_info(data_line): data_use = data_line.split(' = ')", "v in zip(keys, values[1:]): #out[k.replace(' ', '')] = v out[k.replace(' ', '')] =", "vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return", "import sys inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n',", "in lib: return lib[key] else: return key mon_num = {v.lower(): \"%02d\"%k for k,", "pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper is about the", "'\\mnras': 'Monthly Notices of the Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd':", "left for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name,", "number 2 is left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation:", "'', links, #'This paper is about the number 1. The number 2 is", "'Astronomy and Astrophysics', r'\\prd': 'Physical Review D', } if key in lib: return", "= data_red data_red = data.replace(' ', ' ') data = data.split('@ARTICLE{') data =", "The number 2 is left for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl:", "'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of the Royal Astronomical Society', r'\\aap':", "for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k", "'Physical Review D', } if key in lib: return lib[key] else: return key", "data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in data_use] values = [rm_spaces(v) for v", "'', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd),", "1. The number 2 is left for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']),", "= [rm_spaces(v) for v in values] out = {k: '' for k in", "'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red = data.replace(' ', ' ') while", "('adsurl', '[**ADS**](%s)'), ] if k in pd ]) content = '\\n'.join([ '---', 'title:", "%s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper Title Number", "in dat.split('@MISC{') if d!=''] for dat in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat))", "r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of the Royal Astronomical Society', r'\\aap': 'Astronomy", "for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You. (2009).", "pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if", "return lib[key] else: return key mon_num = {v.lower(): \"%02d\"%k for k, v in", "al'%authors.split(' and ')[0].split(',')[0] else: return authors def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif", "= values[0].replace('.', '') for k, v in zip(keys, values[1:]): #out[k.replace(' ', '')] =", "return filename, content if __name__=='__main__': import sys inbib = sys.argv[1] data = ''.join(open(inbib,", "= '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is", "the Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review D', }", "for v in values] out = {k: '' for k in ('author', 'title',", "%s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper is", "(%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename,", "__name__=='__main__': import sys inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data =", "mon_num = {v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string):", "' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def", "filename, content if __name__=='__main__': import sys inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines())", "v in values] out = {k: '' for k in ('author', 'title', 'journal',", "authors def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return ' %s,'%pd['page'] elif", "= sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red =", "'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about the number 1. The", "Notices of the Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review", "'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about the number", "= {k: '' for k in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')}", "is about the number 1. The number 2 is left for future work.',", "work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper", "def rm_spaces(string): base = string test = base.replace('\\t', ' ') test = test.replace('", "vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper is about the number 1.", "Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s", "left for future work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You.", "future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009).", "return base def get_info(data_line): data_use = data_line.split(' = ') keys = [d.split(',')[-1] for", "in zip(keys, values[1:]): #out[k.replace(' ', '')] = v out[k.replace(' ', '')] = v.replace('{',", "', '')] = v out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"', '') if", "' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date,", "r'\\prd': 'Physical Review D', } if key in lib: return lib[key] else: return", "pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__':", "= ') keys = [d.split(',')[-1] for d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for", "/publication/%s'%filename[:-3], #'excerpt: \"This paper is about the number 1. The number 2 is", "import calendar def journals(key): lib = { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal',", "'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.', '') for k, v in zip(keys,", "if d!=''] for dat in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f", "for dat in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f = open('_publications/%s'%name,", "else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename", "values[1:]): #out[k.replace(' ', '')] = v out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"',", "pd['ref']) links = ', '.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'),", "', ' ') return base def get_info(data_line): data_use = data_line.split(' = ') keys", "lib = { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of", "#print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f = open('_publications/%s'%name, 'w') print(content, file=f) f.close()", "while data!=data_red: data = data_red data_red = data.replace(' ', ' ') data =", "return '%s et al'%authors.split(' and ')[0].split(',')[0] else: return authors def vol_page(pd): if pd['volume']==pd['page']=='':", "def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='':", "[d.split(',')[-1] for d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in data_use] values", "if k>0} def rm_spaces(string): base = string test = base.replace('\\t', ' ') test", "'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref'] = values[0].replace('.', '') for k, v", "for k, v in zip(keys, values[1:]): #out[k.replace(' ', '')] = v out[k.replace(' ',", "= data.replace(' ', ' ') while data!=data_red: data = data_red data_red = data.replace('", "'.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if", "'' for k in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref'] =", "out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out def abb_author(authors): if", "'year')} out['ref'] = values[0].replace('.', '') for k, v in zip(keys, values[1:]): #out[k.replace(' ',", "sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red = data.replace('", "', ' ') while data!=data_red: data = data_red data_red = data.replace(' ', '", "in values] out = {k: '' for k in ('author', 'title', 'journal', 'volume',", "base def get_info(data_line): data_use = data_line.split(' = ') keys = [d.split(',')[-1] for d", "('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in pd ]) content = '\\n'.join([", "1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']),", "You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s", "print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k]", "v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base = string test = base.replace('\\t',", "pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper is about the number 1. The", "lib[key] else: return key mon_num = {v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr)", "if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return '", "\"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content))", "about the number 1. The number 2 is left for future work.', #'[Download", "') test = test.replace(' ', ' ') while test!=base: base = test test", "data!=data_red: data = data_red data_red = data.replace(' ', ' ') data = data.split('@ARTICLE{')", "'---', '', links, #'This paper is about the number 1. The number 2", "%s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename,", "2 is left for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation:", "')[0].split(',')[0] else: return authors def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return", "if len(authors.split(' and '))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0] else: return authors", "'') for k, v in zip(keys, values[1:]): #out[k.replace(' ', '')] = v out[k.replace('", "dat.split('@MISC{') if d!=''] for dat in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name)", "'[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in pd ]) content =", "Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended", "#out[k.replace(' ', '')] = v out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('\"', '')", "'') if 'howpublished' in out and out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\',", "'').replace('\"', '') if 'howpublished' in out and out['journal']=='': out['journal'] = out['howpublished'] out['author'] =", "'%s et al'%authors.split(' and ')[0].split(',')[0] else: return authors def vol_page(pd): if pd['volume']==pd['page']=='': return", "d!=''] for dat in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f =", "out and out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out def", "sys inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '')", "Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']),", "elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else: return '", "calendar def journals(key): lib = { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras':", "Astrophysics', r'\\prd': 'Physical Review D', } if key in lib: return lib[key] else:", "dat in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f = open('_publications/%s'%name, 'w')", "out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out def abb_author(authors): if len(authors.split('", "in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in data_use] values = [rm_spaces(v) for", "for dat in data for d in dat.split('@MISC{') if d!=''] for dat in", "]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__': import sys inbib = sys.argv[1]", "values = [','.join(d.split(',')[:-1]) for d in data_use] values = [rm_spaces(v) for v in", "content if __name__=='__main__': import sys inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0])", "in data for d in dat.split('@MISC{') if d!=''] for dat in data: #print(gen_file(get_info(dat)))", "get_info(data_line): data_use = data_line.split(' = ') keys = [d.split(',')[-1] for d in data_use][:-1]", "#'citation: \"Your Name, You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation:", "'\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about", "'').replace('}', '').replace('\"', '') if 'howpublished' in out and out['journal']=='': out['journal'] = out['howpublished'] out['author']", "content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper", "test = base.replace('\\t', ' ') test = test.replace(' ', ' ') while test!=base:", "number 1. The number 2 is left for future work.\"', 'date: %s'%date, 'venue:", "data_use] values = [rm_spaces(v) for v in values] out = {k: '' for", "of the Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review D',", "{k: '' for k in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')} out['ref']", "the number 1. The number 2 is left for future work.\"', 'date: %s'%date,", "def get_info(data_line): data_use = data_line.split(' = ') keys = [d.split(',')[-1] for d in", "= base.replace('\\t', ' ') test = test.replace(' ', ' ') while test!=base: base", "return key mon_num = {v.lower(): \"%02d\"%k for k, v in enumerate(calendar.month_abbr) if k>0}", "= data.split('@ARTICLE{') data = [d for dat in data for d in dat.split('@MISC{')", "citation: Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).', '',", "here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal", "= base.replace(' ', ' ') return base def get_info(data_line): data_use = data_line.split(' =", "if 'howpublished' in out and out['journal']=='': out['journal'] = out['howpublished'] out['author'] = out['author'].replace('\\\\', '')", "data = [d for dat in data for d in dat.split('@MISC{') if d!='']", "print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__': import sys inbib = sys.argv[1] data", "r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical Review D', } if key in lib:", "= [d.split(',')[-1] for d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in data_use]", "re import calendar def journals(key): lib = { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical", "return '' elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else:", "'[**ADS**](%s)'), ] if k in pd ]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'],", "data.split('@ARTICLE{') data = [d for dat in data for d in dat.split('@MISC{') if", "[d for dat in data for d in dat.split('@MISC{') if d!=''] for dat", "', ' ') data = data.split('@ARTICLE{') data = [d for dat in data", "pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'],", "(2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s (%s).", "= ', '.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'),", "if __name__=='__main__': import sys inbib = sys.argv[1] data = ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data", "return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename =", "journals(key): lib = { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices", "key in lib: return lib[key] else: return key mon_num = {v.lower(): \"%02d\"%k for", "'' elif pd['volume']=='': return ' %s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else: return", "(2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s", "values[0].replace('.', '') for k, v in zip(keys, values[1:]): #out[k.replace(' ', '')] = v", "return authors def vol_page(pd): if pd['volume']==pd['page']=='': return '' elif pd['volume']=='': return ' %s,'%pd['page']", "\"Your Name, You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s.", "k, v in enumerate(calendar.month_abbr) if k>0} def rm_spaces(string): base = string test =", "'%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint',", "is about the number 1. The number 2 is left for future work.\"',", "pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__': import sys inbib =", "keys = [d.split(',')[-1] for d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in", "} if key in lib: return lib[key] else: return key mon_num = {v.lower():", "= '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'),", "'---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3], #'excerpt: \"This paper is about the", "vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__': import sys", "out['howpublished'] out['author'] = out['author'].replace('\\\\', '') return out def abb_author(authors): if len(authors.split(' and '))>2:", "while test!=base: base = test test = base.replace(' ', ' ') return base", "links, #'This paper is about the number 1. The number 2 is left", "= string test = base.replace('\\t', ' ') test = test.replace(' ', ' ')", "v.replace('{', '').replace('}', '').replace('\"', '') if 'howpublished' in out and out['journal']=='': out['journal'] = out['howpublished']", "out['author'].replace('\\\\', '') return out def abb_author(authors): if len(authors.split(' and '))>2: return '%s et", "= [d for dat in data for d in dat.split('@MISC{') if d!=''] for", "string test = base.replace('\\t', ' ') test = test.replace(' ', ' ') while", "fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in pd", "''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red = data.replace(' ', ' ')", "') while test!=base: base = test test = base.replace(' ', ' ') return", "<i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'],", "') data = data.split('@ARTICLE{') data = [d for dat in data for d", "\"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).', '', 'Recommended citation: %s (%s). \"%s\"", "'Monthly Notices of the Royal Astronomical Society', r'\\aap': 'Astronomy and Astrophysics', r'\\prd': 'Physical", "', ' ') while test!=base: base = test test = base.replace(' ', '", "def journals(key): lib = { 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly", "test.replace(' ', ' ') while test!=base: base = test test = base.replace(' ',", "= data.replace('\\n', '') data_red = data.replace(' ', ' ') while data!=data_red: data =", "%s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This paper is about", "for d in data_use] values = [rm_spaces(v) for v in values] out =", "test!=base: base = test test = base.replace(' ', ' ') return base def", "data_use = data_line.split(' = ') keys = [d.split(',')[-1] for d in data_use][:-1] values", "data.replace(' ', ' ') while data!=data_red: data = data_red data_red = data.replace(' ',", "Review D', } if key in lib: return lib[key] else: return key mon_num", "date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for", "in data: #print(gen_file(get_info(dat))) name, content = gen_file(get_info(dat)) print(name) f = open('_publications/%s'%name, 'w') print(content,", "pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links", "<i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return", "Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'],", "data.replace(' ', ' ') data = data.split('@ARTICLE{') data = [d for dat in", "[rm_spaces(v) for v in values] out = {k: '' for k in ('author',", "in data_use] values = [rm_spaces(v) for v in values] out = {k: ''", "&quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '', links, #'This", "'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---', '',", "test = base.replace(' ', ' ') return base def get_info(data_line): data_use = data_line.split('", "pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content if __name__=='__main__': import sys inbib", "' ') data = data.split('@ARTICLE{') data = [d for dat in data for", "base = test test = base.replace(' ', ' ') return base def get_info(data_line):", "') return base def get_info(data_line): data_use = data_line.split(' = ') keys = [d.split(',')[-1]", "[('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in pd ]) content", "d in dat.split('@MISC{') if d!=''] for dat in data: #print(gen_file(get_info(dat))) name, content =", "{ 'arXiv e-prints': 'ArXiv', r'\\apj': 'Astrophysical Journal', '\\mnras': 'Monthly Notices of the Royal", "et al'%authors.split(' and ')[0].split(',')[0] else: return authors def vol_page(pd): if pd['volume']==pd['page']=='': return ''", "data for d in dat.split('@MISC{') if d!=''] for dat in data: #print(gen_file(get_info(dat))) name,", "data = data.split('@ARTICLE{') data = [d for dat in data for d in", "= '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref']) links = ', '.join([fmt%pd[k] for k,", "number 2 is left for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'],", "%s,'%pd['page'] elif pd['page']=='': return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd):", "if key in lib: return lib[key] else: return key mon_num = {v.lower(): \"%02d\"%k", "%s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), ]) print('\\n****%s\\n_____\\n%s\\n'%(filename, content)) return filename, content", "out def abb_author(authors): if len(authors.split(' and '))>2: return '%s et al'%authors.split(' and ')[0].split(',')[0]", "is left for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your", "work.', #'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You. (2009). \"Paper Title", "paper is about the number 1. The number 2 is left for future", "'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper Title", "in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ] if k in pd ])", "' ') return base def get_info(data_line): data_use = data_line.split(' = ') keys =", "', '.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl', '[**ADS**](%s)'), ]", "citation: %s (%s). \"%s\" <i>%s</i>,%s %s %s'%(abb_author(pd['author']), pd['year'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']),", "data_red data_red = data.replace(' ', ' ') data = data.split('@ARTICLE{') data = [d", "in pd ]) content = '\\n'.join([ '---', 'title: \"%s\"'%pd['title'], 'collection: publications', 'permalink: /publication/%s'%filename[:-3],", "'') data_red = data.replace(' ', ' ') while data!=data_red: data = data_red data_red", "= ''.join(open(inbib, 'r').readlines()) print(data.split('\\n@ARTICLE')[0]) data = data.replace('\\n', '') data_red = data.replace(' ', '", "test = test.replace(' ', ' ') while test!=base: base = test test =", "links = ', '.join([fmt%pd[k] for k, fmt in [('doi', '[**Publisher**](http://doi.org/%s)'), ('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'), ('adsurl',", "d in data_use][:-1] values = [','.join(d.split(',')[:-1]) for d in data_use] values = [rm_spaces(v)", "k, v in zip(keys, values[1:]): #out[k.replace(' ', '')] = v out[k.replace(' ', '')]", "pd['page']=='': return ' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date", "' %s,'%pd['volume'] else: return ' %s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'],", "%s:%s,'%(pd['volume'], pd['page']) def gen_file(pd): print(pd) date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()]) filename = '%s-%s.md'%(date, pd['ref'])", "\"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper Title Number 1.&quot; <i>Journal 1</i>. 1(1).\"',", "1(1).\"', 'citation: \"%s. &quot;%s.&quot; <i>%s</i>,%s %s %s\"'%(pd['author'], pd['title'], journals(pd['journal']), vol_page(pd), pd['month'].capitalize(), pd['year']), '---',", "'page', 'month', 'year')} out['ref'] = values[0].replace('.', '') for k, v in zip(keys, values[1:]):", "for future work.\"', 'date: %s'%date, 'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You.", "'venue: \"%s\"'%journals(pd['journal']), 'paperurl: \"%s\"'%pd['adsurl'], #'citation: \"Your Name, You. (2009). &quot;Paper Title Number 1.&quot;", "#'[Download paper here](http://academicpages.github.io/files/paper1.pdf)', #'', #'Recommended citation: Your Name, You. (2009). \"Paper Title Number" ]
[ "soma def porcentagens(lista , khan , correy , li , otooley): t_votos =", "40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome) as arquivo: print(arquivo.read()) #main", "eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome = 'resultado.txt' try:", "soma = 0 for linha in lista: if 'Khan' in linha: soma+=1 return", "for i in range(len(lista)): nome = lista[i][2] if nome not in nomes: nomes.append(nome)", "votosOTooley(lista): soma = 0 for i in range(len(lista)): nome = lista[i][2] if nome", "return lista_por def vencedor(lista_por): maior = 0 cont = 0 index = int", "!= 'Li' and nome != 'Correy': soma += 1 return soma def porcentagens(lista", "= open(nome, 'wt+') b.close() except: print('houve um probrema na criaçao do arquivo') else:", "1: ganhou = 'Correy' elif index == 2: ganhou = 'Li' elif index", "khan = votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens", "!= 'Khan' and nome != 'Li' and nome != 'Correy': soma += 1", "as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-'", "porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan = {khan}') print(f'votos pro Correy =", "khan , correy , li , otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos", "arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a", "print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy, li=li", "lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior = 0 cont = 0 index", "with open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40))", "primeiralista(arq) #print(lista[0]) #letra a tam = len(lista) print(f'O número total de votos expressos", "na criaçao do arquivo') else: print(f'arquivo {nome} criado com sucesso =)') def escreveResultado(tam", "primeiralista(nome): a = open(nome, 'rt') lista1 = [] for linha in a: lista1.append(linha.replace('\\n','').split(','))", "= 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})')", "#letra f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}')", "Correy = {correy}') print(f'votos pra Li = {li}') print(f'votos pro O\\'Tooley = {oTooley}')", "print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})')", "3: ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado():", "a tam = len(lista) print(f'O número total de votos expressos {tam}') #letra b", "b = open(nome, 'wt+') b.close() except: print('houve um probrema na criaçao do arquivo')", "float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley)", "40) with open(nome) as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista =", "=[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior = 0 cont", "votos') #letra f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos:", "linha in lista: if 'Khan' in linha: soma+=1 return soma def votosCorrey(lista): soma", "correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan =", "= open(nome, 'rt') a.close() except: print('arquivo nao encontrado') else: print('Arquivo encontrado com sucesso", "#exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy, li=li , oTooley=oTooley ,", "= 0 for linha in lista: if 'Khan' in linha: soma+=1 return soma", "a = open(nome, 'rt') a.close() except: print('arquivo nao encontrado') else: print('Arquivo encontrado com", "ganhou = 'Li' elif index == 3: ganhou = 'O\\'tooley' eleito = []", "votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li:", "= {li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O", "= 0 cont = 0 index = int ganhou = '' for i", "votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de", "'Li' elif index == 3: ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior)", "lista_por: if i > maior: maior = i index = cont cont +=", "= i index = cont cont += 1 if index == 0: ganhou", "= porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos", "def votosCorrey(lista): soma = 0 for linha in lista: if 'Correy' in linha:", "= '' for i in lista_por: if i > maior: maior = i", "voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com sucesso =)') def criaArquivoResultado(nome):", "in range(len(lista)): nome = lista[i][2] if nome != 'Khan' and nome != 'Li'", "print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy, li=li , oTooley=oTooley", "= lista[i][2] if nome not in nomes: nomes.append(nome) return nomes def votosKhan(lista): soma", "= 0 index = int ganhou = '' for i in lista_por: if", "c porcentagem de cada candidato khan = votosKhan(lista) correy = votosCorrey(lista) li =", "lista: if 'Li' in linha: soma += 1 return soma def votosOTooley(lista): soma", "d print(f'votos pro khan = {khan}') print(f'votos pro Correy = {correy}') print(f'votos pra", "arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy, li=li , oTooley=oTooley , v=v)", "= 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n')", "0 for i in range(len(lista)): nome = lista[i][2] if nome != 'Khan' and", "um total de {v[1]} porcento dos votos') #letra f print('-'*40) txt = 'Resultados", "= open(nome, 'rt') lista1 = [] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return", "+= 1 return soma def votosLi(lista): soma = 0 for linha in lista:", "'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy:", "tam = len(lista) print(f'O número total de votos expressos {tam}') #letra b candidatos", "print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens", "arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with", "khan , correy , li ,oTooley , v): nome = 'resultado.txt' with open(nome,'w')", "= votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy,", "= len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley =", "criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com sucesso =)') def criaArquivoResultado(nome): try: b", "de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n')", "try: b = open(nome, 'rt') b.close() except: print('arquivo nao encontrado, vou criar um", "por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por", "if nome != 'Khan' and nome != 'Li' and nome != 'Correy': soma", "print(f'votos pro O\\'Tooley = {oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O vencedor foi", "print(f'lista de candidatos: {candidatos}') #letra c porcentagem de cada candidato khan = votosKhan(lista)", "correy , li , otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy =", "elif index == 1: ganhou = 'Correy' elif index == 2: ganhou =", "um para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com sucesso =)')", "por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return", "0 for linha in lista: if 'Li' in linha: soma += 1 return", "0: ganhou = 'Khan' elif index == 1: ganhou = 'Correy' elif index", "except: print('houve um probrema na criaçao do arquivo') else: print(f'arquivo {nome} criado com", "soma += 1 return soma def votosLi(lista): soma = 0 for linha in", "len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos", "open(nome, 'rt') b.close() except: print('arquivo nao encontrado, vou criar um para voce =)')", "def votosKhan(lista): soma = 0 for linha in lista: if 'Khan' in linha:", "else: print('Arquivo encontrado com sucesso =)') def primeiralista(nome): a = open(nome, 'rt') lista1", "linha in lista: if 'Correy' in linha: soma += 1 return soma def", "khan = {khan}') print(f'votos pro Correy = {correy}') print(f'votos pra Li = {li}')", "== 3: ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito def", "eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)}", "40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n')", "#letra a tam = len(lista) print(f'O número total de votos expressos {tam}') #letra", "1 return soma def porcentagens(lista , khan , correy , li , otooley):", "= 0 for i in range(len(lista)): nome = lista[i][2] if nome != 'Khan'", "= votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens =", "= float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li)", "for linha in lista: if 'Correy' in linha: soma += 1 return soma", "li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley)", "lista_por def vencedor(lista_por): maior = 0 cont = 0 index = int ganhou", "porcento dos votos') #letra f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total", "as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra", "def nomecandidatos(lista): nomes = [] for i in range(len(lista)): nome = lista[i][2] if", "({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40)", "in lista: if 'Correy' in linha: soma += 1 return soma def votosLi(lista):", "nomes.append(nome) return nomes def votosKhan(lista): soma = 0 for linha in lista: if", "arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de", "open(nome, 'rt') a.close() except: print('arquivo nao encontrado') else: print('Arquivo encontrado com sucesso =)')", "nome not in nomes: nomes.append(nome) return nomes def votosKhan(lista): soma = 0 for", "print('Arquivo encontrado com sucesso =)') def primeiralista(nome): a = open(nome, 'rt') lista1 =", "=)') def escreveResultado(tam , lista_porcentagens , khan , correy , li ,oTooley ,", "def votosOTooley(lista): soma = 0 for i in range(len(lista)): nome = lista[i][2] if", "({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40)", "um probrema na criaçao do arquivo') else: print(f'arquivo {nome} criado com sucesso =)')", "* 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)}", "return soma def porcentagens(lista , khan , correy , li , otooley): t_votos", "print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan = {khan}') print(f'votos pro", "open(nome, 'wt+') b.close() except: print('houve um probrema na criaçao do arquivo') else: print(f'arquivo", "= [] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome = 'resultado.txt' try: b", "maior: maior = i index = cont cont += 1 if index ==", "({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo", "except: print('arquivo nao encontrado') else: print('Arquivo encontrado com sucesso =)') def primeiralista(nome): a", "votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley:", "1 return soma def votosOTooley(lista): soma = 0 for i in range(len(lista)): nome", "if nome not in nomes: nomes.append(nome) return nomes def votosKhan(lista): soma = 0", "index = cont cont += 1 if index == 0: ganhou = 'Khan'", "criaçao do arquivo') else: print(f'arquivo {nome} criado com sucesso =)') def escreveResultado(tam ,", "if index == 0: ganhou = 'Khan' elif index == 1: ganhou =", "criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close() except: print('houve um probrema na criaçao", "> maior: maior = i index = cont cont += 1 if index", "print('houve um probrema na criaçao do arquivo') else: print(f'arquivo {nome} criado com sucesso", "de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan = {khan}') print(f'votos pro Correy", "a = open(nome, 'rt') lista1 = [] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0)", "pro Correy = {correy}') print(f'votos pra Li = {li}') print(f'votos pro O\\'Tooley =", "#letra b candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c porcentagem de", ", correy , li , otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy", "if 'Khan' in linha: soma+=1 return soma def votosCorrey(lista): soma = 0 for", "def criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close() except: print('houve um probrema na", "= lista[i][2] if nome != 'Khan' and nome != 'Li' and nome !=", "def vencedor(lista_por): maior = 0 cont = 0 index = int ganhou =", "cont += 1 if index == 0: ganhou = 'Khan' elif index ==", "eleito def procuraArquivoResultado(): nome = 'resultado.txt' try: b = open(nome, 'rt') b.close() except:", "lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior = 0 cont = 0 index =", "khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan", "ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome", "arquivo.write('-' * 40) with open(nome) as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq)", "'Correy': soma += 1 return soma def porcentagens(lista , khan , correy ,", "encontrado') else: print('Arquivo encontrado com sucesso =)') def primeiralista(nome): a = open(nome, 'rt')", "not in nomes: nomes.append(nome) return nomes def votosKhan(lista): soma = 0 for linha", "def primeiralista(nome): a = open(nome, 'rt') lista1 = [] for linha in a:", "index == 1: ganhou = 'Correy' elif index == 2: ganhou = 'Li'", "arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome) as arquivo:", "soma += 1 return soma def votosOTooley(lista): soma = 0 for i in", "print('feito!') else: print(f'arquivo {nome} encontrado com sucesso =)') def criaArquivoResultado(nome): try: b =", "i in range(len(lista)): nome = lista[i][2] if nome != 'Khan' and nome !=", "def votosLi(lista): soma = 0 for linha in lista: if 'Li' in linha:", "abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a tam = len(lista) print(f'O número total", "else: print(f'arquivo {nome} criado com sucesso =)') def escreveResultado(tam , lista_porcentagens , khan", "'Khan' elif index == 1: ganhou = 'Correy' elif index == 2: ganhou", "= 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a tam = len(lista) print(f'O", "sucesso =)') def primeiralista(nome): a = open(nome, 'rt') lista1 = [] for linha", "= votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista", "in nomes: nomes.append(nome) return nomes def votosKhan(lista): soma = 0 for linha in", "'Khan' in linha: soma+=1 return soma def votosCorrey(lista): soma = 0 for linha", "open(nome) as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0])", "lista = primeiralista(arq) #print(lista[0]) #letra a tam = len(lista) print(f'O número total de", "=)') def primeiralista(nome): a = open(nome, 'rt') lista1 = [] for linha in", "in lista: if 'Li' in linha: soma += 1 return soma def votosOTooley(lista):", "except: print('arquivo nao encontrado, vou criar um para voce =)') criaArquivoResultado(nome) print('feito!') else:", "print(f'arquivo {nome} criado com sucesso =)') def escreveResultado(tam , lista_porcentagens , khan ,", "porcentagens(lista , khan , correy , li , otooley): t_votos = len(lista) por_khan", "{lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar", "0 index = int ganhou = '' for i in lista_por: if i", "vencedor foi {v[0]} com um total de {v[1]} porcento dos votos') #letra f", "+= 1 if index == 0: ganhou = 'Khan' elif index == 1:", "vou criar um para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com", "soma+=1 return soma def votosCorrey(lista): soma = 0 for linha in lista: if", "pra Li = {li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra e v =", "print('arquivo nao encontrado') else: print('Arquivo encontrado com sucesso =)') def primeiralista(nome): a =", "procuraArquivoResultado(): nome = 'resultado.txt' try: b = open(nome, 'rt') b.close() except: print('arquivo nao", "for linha in lista: if 'Li' in linha: soma += 1 return soma", "de {v[1]} porcento dos votos') #letra f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40))", "b candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c porcentagem de cada", "index == 2: ganhou = 'Li' elif index == 3: ganhou = 'O\\'tooley'", "int ganhou = '' for i in lista_por: if i > maior: maior", "sucesso =)') def escreveResultado(tam , lista_porcentagens , khan , correy , li ,oTooley", "{lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado()", "criado com sucesso =)') def escreveResultado(tam , lista_porcentagens , khan , correy ,", "ganhou = 'Khan' elif index == 1: ganhou = 'Correy' elif index ==", "'Khan' and nome != 'Li' and nome != 'Correy': soma += 1 return", "lista1.pop(0) return lista1 def nomecandidatos(lista): nomes = [] for i in range(len(lista)): nome", "print(f'arquivo {nome} encontrado com sucesso =)') def criaArquivoResultado(nome): try: b = open(nome, 'wt+')", ", v): nome = 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n')", "oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens:", "= open(nome, 'rt') b.close() except: print('arquivo nao encontrado, vou criar um para voce", ", li , otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos", "for i in lista_por: if i > maior: maior = i index =", "try: a = open(nome, 'rt') a.close() except: print('arquivo nao encontrado') else: print('Arquivo encontrado", "in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes = [] for i", "{v[1]} porcento dos votos') #letra f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40)", "O\\'Tooley = {oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com", "'rt') lista1 = [] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def", "elif index == 3: ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return", "{v[0]}\\n') arquivo.write('-' * 40) with open(nome) as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt'", "'rt') a.close() except: print('arquivo nao encontrado') else: print('Arquivo encontrado com sucesso =)') def", "sucesso =)') def criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close() except: print('houve um", "arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome) as arquivo: print(arquivo.read()) #main arq", "com um total de {v[1]} porcento dos votos') #letra f print('-'*40) txt =", ", khan , correy , li ,oTooley , v): nome = 'resultado.txt' with", "{tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)}", "print(f'votos pro khan = {khan}') print(f'votos pro Correy = {correy}') print(f'votos pra Li", "v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um total de {v[1]} porcento", "{oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um total", "encontrado com sucesso =)') def criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close() except:", "print(f'votos pro Correy = {correy}') print(f'votos pra Li = {li}') print(f'votos pro O\\'Tooley", "= {correy}') print(f'votos pra Li = {li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra", "arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a tam = len(lista)", "e v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um total de {v[1]}", "i in range(len(lista)): nome = lista[i][2] if nome not in nomes: nomes.append(nome) return", "correy , li ,oTooley , v): nome = 'resultado.txt' with open(nome,'w') as arquivo:", "linha in lista: if 'Li' in linha: soma += 1 return soma def", "lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d", ", otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li =", "arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n')", "nomes = [] for i in range(len(lista)): nome = lista[i][2] if nome not", "arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40)", "= votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}')", "votosKhan(lista): soma = 0 for linha in lista: if 'Khan' in linha: soma+=1", "return soma def votosLi(lista): soma = 0 for linha in lista: if 'Li'", "({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam,", "print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan:", "else: print(f'arquivo {nome} encontrado com sucesso =)') def criaArquivoResultado(nome): try: b = open(nome,", "and nome != 'Li' and nome != 'Correy': soma += 1 return soma", "({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome) as", "def abreArquivo(nome): try: a = open(nome, 'rt') a.close() except: print('arquivo nao encontrado') else:", "return eleito def procuraArquivoResultado(): nome = 'resultado.txt' try: b = open(nome, 'rt') b.close()", "de votos expressos {tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}')", "porcentagem de cada candidato khan = votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista)", "{nome} encontrado com sucesso =)') def criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close()", "lista[i][2] if nome not in nomes: nomes.append(nome) return nomes def votosKhan(lista): soma =", "= cont cont += 1 if index == 0: ganhou = 'Khan' elif", "40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total", "a.close() except: print('arquivo nao encontrado') else: print('Arquivo encontrado com sucesso =)') def primeiralista(nome):", "'Li' in linha: soma += 1 return soma def votosOTooley(lista): soma = 0", "= {oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um", "#main arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a tam =", "li , otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li", "in linha: soma+=1 return soma def votosCorrey(lista): soma = 0 for linha in", "nome != 'Li' and nome != 'Correy': soma += 1 return soma def", "1 if index == 0: ganhou = 'Khan' elif index == 1: ganhou", "return soma def votosCorrey(lista): soma = 0 for linha in lista: if 'Correy'", "in linha: soma += 1 return soma def votosLi(lista): soma = 0 for", "total de {v[1]} porcento dos votos') #letra f print('-'*40) txt = 'Resultados eleitorais'", "soma def votosCorrey(lista): soma = 0 for linha in lista: if 'Correy' in", "escreveResultado(tam , lista_porcentagens , khan , correy , li ,oTooley , v): nome", "'wt+') b.close() except: print('houve um probrema na criaçao do arquivo') else: print(f'arquivo {nome}", "{tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c porcentagem", "40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)}", "for i in range(len(lista)): nome = lista[i][2] if nome != 'Khan' and nome", "soma = 0 for linha in lista: if 'Li' in linha: soma +=", "i index = cont cont += 1 if index == 0: ganhou =", "{correy}') print(f'votos pra Li = {li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra e", "para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com sucesso =)') def", "lista: if 'Correy' in linha: soma += 1 return soma def votosLi(lista): soma", "dos votos') #letra f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de", "= 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados", "!= 'Correy': soma += 1 return soma def porcentagens(lista , khan , correy", "candidato khan = votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista)", "({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan,", "print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})')", "if 'Li' in linha: soma += 1 return soma def votosOTooley(lista): soma =", "nome = 'resultado.txt' try: b = open(nome, 'rt') b.close() except: print('arquivo nao encontrado,", "número total de votos expressos {tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista de", "{khan}') print(f'votos pro Correy = {correy}') print(f'votos pra Li = {li}') print(f'votos pro", "porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro", "=)') def criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close() except: print('houve um probrema", "+= 1 return soma def porcentagens(lista , khan , correy , li ,", "{v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy, li=li ,", "arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' *", "votosLi(lista): soma = 0 for linha in lista: if 'Li' in linha: soma", "arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n')", "print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40)", "print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li:", "print(f'O vencedor foi {v[0]} com um total de {v[1]} porcento dos votos') #letra", "votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li,", "i > maior: maior = i index = cont cont += 1 if", "cont cont += 1 if index == 0: ganhou = 'Khan' elif index", "* 40) with open(nome) as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista", "encontrado, vou criar um para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado", "'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados eleitorais'", "maior = i index = cont cont += 1 if index == 0:", "por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por):", "[] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes =", "nomecandidatos(lista): nomes = [] for i in range(len(lista)): nome = lista[i][2] if nome", "in lista: if 'Khan' in linha: soma+=1 return soma def votosCorrey(lista): soma =", "votosCorrey(lista): soma = 0 for linha in lista: if 'Correy' in linha: soma", ",oTooley , v): nome = 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' * 40)", "linha: soma+=1 return soma def votosCorrey(lista): soma = 0 for linha in lista:", "arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40)", "Li = {li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra e v = vencedor(lista_porcentagens)", "print('arquivo nao encontrado, vou criar um para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo", "lista[i][2] if nome != 'Khan' and nome != 'Li' and nome != 'Correy':", "'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome = 'resultado.txt'", "de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})')", "encontrado com sucesso =)') def primeiralista(nome): a = open(nome, 'rt') lista1 = []", "nao encontrado') else: print('Arquivo encontrado com sucesso =)') def primeiralista(nome): a = open(nome,", "abreArquivo(nome): try: a = open(nome, 'rt') a.close() except: print('arquivo nao encontrado') else: print('Arquivo", "expressos {tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c", "#print(lista[0]) #letra a tam = len(lista) print(f'O número total de votos expressos {tam}')", "linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes = [] for", "range(len(lista)): nome = lista[i][2] if nome not in nomes: nomes.append(nome) return nomes def", "candidatos: {candidatos}') #letra c porcentagem de cada candidato khan = votosKhan(lista) correy =", "eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome = 'resultado.txt' try: b = open(nome,", "cada candidato khan = votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista) oTooley =", "= {khan}') print(f'votos pro Correy = {correy}') print(f'votos pra Li = {li}') print(f'votos", "index = int ganhou = '' for i in lista_por: if i >", ", lista_porcentagens , khan , correy , li ,oTooley , v): nome =", "probrema na criaçao do arquivo') else: print(f'arquivo {nome} criado com sucesso =)') def", "txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos:", "soma += 1 return soma def porcentagens(lista , khan , correy , li", "1 return soma def votosLi(lista): soma = 0 for linha in lista: if", "== 1: ganhou = 'Correy' elif index == 2: ganhou = 'Li' elif", "'Correy' elif index == 2: ganhou = 'Li' elif index == 3: ganhou", "lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior = 0 cont =", "float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior =", "arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-'", "nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c porcentagem de cada candidato khan =", "maior = 0 cont = 0 index = int ganhou = '' for", "ganhou = 'Correy' elif index == 2: ganhou = 'Li' elif index ==", "eleito.append(maior) return eleito def procuraArquivoResultado(): nome = 'resultado.txt' try: b = open(nome, 'rt')", "{lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' *", "= 'Correy' elif index == 2: ganhou = 'Li' elif index == 3:", "lista_porcentagens , khan , correy , li ,oTooley , v): nome = 'resultado.txt'", "=)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com sucesso =)') def criaArquivoResultado(nome): try:", "de cada candidato khan = votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista) oTooley", ", correy , li ,oTooley , v): nome = 'resultado.txt' with open(nome,'w') as", "linha: soma += 1 return soma def votosOTooley(lista): soma = 0 for i", "linha: soma += 1 return soma def votosLi(lista): soma = 0 for linha", "({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n')", "#letra e v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um total de", "open(nome, 'rt') lista1 = [] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1", "= len(lista) print(f'O número total de votos expressos {tam}') #letra b candidatos =", "arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n')", "lista1 def nomecandidatos(lista): nomes = [] for i in range(len(lista)): nome = lista[i][2]", "== 2: ganhou = 'Li' elif index == 3: ganhou = 'O\\'tooley' eleito", ", khan , correy , li , otooley): t_votos = len(lista) por_khan =", "= float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por", "nome = lista[i][2] if nome != 'Khan' and nome != 'Li' and nome", "v): nome = 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt", "total de votos expressos {tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista de candidatos:", "with open(nome) as arquivo: print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq)", "li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan = {khan}')", "soma def votosOTooley(lista): soma = 0 for i in range(len(lista)): nome = lista[i][2]", "def procuraArquivoResultado(): nome = 'resultado.txt' try: b = open(nome, 'rt') b.close() except: print('arquivo", "nome != 'Correy': soma += 1 return soma def porcentagens(lista , khan ,", "'resultado.txt' try: b = open(nome, 'rt') b.close() except: print('arquivo nao encontrado, vou criar", "eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' *", "def porcentagens(lista , khan , correy , li , otooley): t_votos = len(lista)", "nomes def votosKhan(lista): soma = 0 for linha in lista: if 'Khan' in", "0 for linha in lista: if 'Correy' in linha: soma += 1 return", "arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome) as arquivo: print(arquivo.read()) #main arq =", "[] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome = 'resultado.txt' try: b =", "#letra c porcentagem de cada candidato khan = votosKhan(lista) correy = votosCorrey(lista) li", "for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes = []", "range(len(lista)): nome = lista[i][2] if nome != 'Khan' and nome != 'Li' and", "li ,oTooley , v): nome = 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' *", "foi {v[0]} com um total de {v[1]} porcento dos votos') #letra f print('-'*40)", "+= 1 return soma def votosOTooley(lista): soma = 0 for i in range(len(lista)):", "f print('-'*40) txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40)", "= vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um total de {v[1]} porcento dos", "{lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor:", "arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-'", "'Correy' in linha: soma += 1 return soma def votosLi(lista): soma = 0", "nomes: nomes.append(nome) return nomes def votosKhan(lista): soma = 0 for linha in lista:", "0 for linha in lista: if 'Khan' in linha: soma+=1 return soma def", "try: b = open(nome, 'wt+') b.close() except: print('houve um probrema na criaçao do", "if 'Correy' in linha: soma += 1 return soma def votosLi(lista): soma =", "in lista_por: if i > maior: maior = i index = cont cont", "nome = lista[i][2] if nome not in nomes: nomes.append(nome) return nomes def votosKhan(lista):", "print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado", ", li ,oTooley , v): nome = 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-'", "lista1 = [] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista):", "= [] for linha in a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes", "lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior = 0", "arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy:", "print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)}", "in linha: soma += 1 return soma def votosOTooley(lista): soma = 0 for", "votos expressos {tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra", "print(f'O número total de votos expressos {tam}') #letra b candidatos = nomecandidatos(lista) print(f'lista", "{candidatos}') #letra c porcentagem de cada candidato khan = votosKhan(lista) correy = votosCorrey(lista)", "pro O\\'Tooley = {oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]}", "lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior = 0 cont = 0", "float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan)", "len(lista) print(f'O número total de votos expressos {tam}') #letra b candidatos = nomecandidatos(lista)", "'' for i in lista_por: if i > maior: maior = i index", "nome = 'resultado.txt' with open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt =", "return lista1 def nomecandidatos(lista): nomes = [] for i in range(len(lista)): nome =", "b.close() except: print('houve um probrema na criaçao do arquivo') else: print(f'arquivo {nome} criado", "nao encontrado, vou criar um para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome}", "= 'Khan' elif index == 1: ganhou = 'Correy' elif index == 2:", "{li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra e v = vencedor(lista_porcentagens) print(f'O vencedor", "b.close() except: print('arquivo nao encontrado, vou criar um para voce =)') criaArquivoResultado(nome) print('feito!')", "t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley", "* 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley:", "pro khan = {khan}') print(f'votos pro Correy = {correy}') print(f'votos pra Li =", "= 'resultado.txt' try: b = open(nome, 'rt') b.close() except: print('arquivo nao encontrado, vou", "print(arquivo.read()) #main arq = 'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a tam", "and nome != 'Correy': soma += 1 return soma def porcentagens(lista , khan", "if i > maior: maior = i index = cont cont += 1", "= 'Li' elif index == 3: ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou)", "arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan:", "votosKhan(lista) correy = votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista,", "[] for i in range(len(lista)): nome = lista[i][2] if nome not in nomes:", "= primeiralista(arq) #print(lista[0]) #letra a tam = len(lista) print(f'O número total de votos", "return nomes def votosKhan(lista): soma = 0 for linha in lista: if 'Khan'", "cont = 0 index = int ganhou = '' for i in lista_por:", "{lista_porcentagens}') #letra d print(f'votos pro khan = {khan}') print(f'votos pro Correy = {correy}')", "= nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c porcentagem de cada candidato khan", "index == 3: ganhou = 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito", "vencedor(lista_por): maior = 0 cont = 0 index = int ganhou = ''", "for linha in lista: if 'Khan' in linha: soma+=1 return soma def votosCorrey(lista):", "= float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def vencedor(lista_por): maior", "* 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome) as arquivo: print(arquivo.read())", "print(f'votos pra Li = {li}') print(f'votos pro O\\'Tooley = {oTooley}') #letra e v", "= 0 for linha in lista: if 'Correy' in linha: soma += 1", "* 40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n')", "({khan})') print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}')", "i in lista_por: if i > maior: maior = i index = cont", "'Li' and nome != 'Correy': soma += 1 return soma def porcentagens(lista ,", "candidatos = nomecandidatos(lista) print(f'lista de candidatos: {candidatos}') #letra c porcentagem de cada candidato", "return soma def votosOTooley(lista): soma = 0 for i in range(len(lista)): nome =", "de candidatos: {candidatos}') #letra c porcentagem de cada candidato khan = votosKhan(lista) correy", "arquivo') else: print(f'arquivo {nome} criado com sucesso =)') def escreveResultado(tam , lista_porcentagens ,", "def escreveResultado(tam , lista_porcentagens , khan , correy , li ,oTooley , v):", "por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy)", "votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra", "{lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' *", "do arquivo') else: print(f'arquivo {nome} criado com sucesso =)') def escreveResultado(tam , lista_porcentagens", "txt = 'Resultados eleitorais' print(txt.center(40)) print('-'*40) print(f'Total de votos: {tam}') print('-'*40) print(f'Khan: {lista_porcentagens[0].__round__(2)}", "= [] for i in range(len(lista)): nome = lista[i][2] if nome not in", "arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)}", "criar um para voce =)') criaArquivoResultado(nome) print('feito!') else: print(f'arquivo {nome} encontrado com sucesso", "0 cont = 0 index = int ganhou = '' for i in", "{v[0]} com um total de {v[1]} porcento dos votos') #letra f print('-'*40) txt", "= 0 for linha in lista: if 'Li' in linha: soma += 1", "soma def votosLi(lista): soma = 0 for linha in lista: if 'Li' in", "#letra d print(f'votos pro khan = {khan}') print(f'votos pro Correy = {correy}') print(f'votos", "soma = 0 for i in range(len(lista)): nome = lista[i][2] if nome !=", "= float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[]", "com sucesso =)') def criaArquivoResultado(nome): try: b = open(nome, 'wt+') b.close() except: print('houve", "{lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor: {v[0]}\\n') arquivo.write('-' * 40) with open(nome)", "print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})') print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})') print(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40)", "in range(len(lista)): nome = lista[i][2] if nome not in nomes: nomes.append(nome) return nomes", "index == 0: ganhou = 'Khan' elif index == 1: ganhou = 'Correy'", "== 0: ganhou = 'Khan' elif index == 1: ganhou = 'Correy' elif", "= int ganhou = '' for i in lista_por: if i > maior:", "open(nome,'w') as arquivo: arquivo.write('-' * 40) arquivo.write('\\n') txt = 'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n')", "{lista_porcentagens[3].__round__(2)} ({oTooley})') print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens ,", "otooley): t_votos = len(lista) por_khan = float(khan*100)/t_votos por_correy = float(correy*100)/t_votos por_li = float(li*100)/t_votos", "correy = votosCorrey(lista) li = votosLi(lista) oTooley = votosOTooley(lista) lista_porcentagens = porcentagens(lista, khan=khan,", "lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes = [] for i in range(len(lista)):", "{tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\\n') arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)}", "lista: if 'Khan' in linha: soma+=1 return soma def votosCorrey(lista): soma = 0", "soma = 0 for linha in lista: if 'Correy' in linha: soma +=", "2: ganhou = 'Li' elif index == 3: ganhou = 'O\\'tooley' eleito =", "print('-'*40) print(f'Vencedor: {v[0]}') print('-'*40) #exportar arquivo resultado procuraArquivoResultado() escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy,", "{lista_porcentagens[1].__round__(2)} ({correy})\\n') arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\\n') arquivo.write(f'O\\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Vencedor:", "otooley=oTooley) print(f'lista de porcentagens: {lista_porcentagens}') #letra d print(f'votos pro khan = {khan}') print(f'votos", "elif index == 2: ganhou = 'Li' elif index == 3: ganhou =", "nome != 'Khan' and nome != 'Li' and nome != 'Correy': soma +=", "<filename>Desafios/Next_Desafio_01/main_Desafio_02.py def abreArquivo(nome): try: a = open(nome, 'rt') a.close() except: print('arquivo nao encontrado')", "{nome} criado com sucesso =)') def escreveResultado(tam , lista_porcentagens , khan , correy", "'rt') b.close() except: print('arquivo nao encontrado, vou criar um para voce =)') criaArquivoResultado(nome)", "a: lista1.append(linha.replace('\\n','').split(',')) lista1.pop(0) return lista1 def nomecandidatos(lista): nomes = [] for i in", "com sucesso =)') def primeiralista(nome): a = open(nome, 'rt') lista1 = [] for", "b = open(nome, 'rt') b.close() except: print('arquivo nao encontrado, vou criar um para", "arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-' * 40) arquivo.write('\\n')", "'Resultados eleitorais' arquivo.write(txt.center(40)) arquivo.write('\\n') arquivo.write('-' * 40) arquivo.write('\\n') arquivo.write(f'Total de votos: {tam}\\n') arquivo.write('-'", "com sucesso =)') def escreveResultado(tam , lista_porcentagens , khan , correy , li", "vencedor(lista_porcentagens) print(f'O vencedor foi {v[0]} com um total de {v[1]} porcento dos votos')", "'dados_elecao.txt' abreArquivo(arq) lista = primeiralista(arq) #print(lista[0]) #letra a tam = len(lista) print(f'O número", "ganhou = '' for i in lista_por: if i > maior: maior =", "float(li*100)/t_votos por_otooley = float(otooley*100)/t_votos lista_por =[] lista_por.append(por_khan) lista_por.append(por_correy) lista_por.append(por_li) lista_por.append(por_otooley) return lista_por def", "= 'O\\'tooley' eleito = [] eleito.append(ganhou) eleito.append(maior) return eleito def procuraArquivoResultado(): nome =" ]
[ "download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if current rows is less", "= pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df; only ticker, gvkey, and iid", "tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like some code that tests some db", "library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really long time... # # df", "dfs = [] for i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i))))", "row by row, and can't get it working anyhow # need to figure", "if table == 'idxcst_his': # converts date columns to datetime df['from'] = pd.to_datetime(df['from'],", "a sql ordering issue or something nobs = 10000000 for i, start in", "price changes each year annualized_return = (np.prod([1 + p for p in price_chg_1y.values()])", "const_df.shape[0] - 1 # else: # rows_to_get = nrows # offset = 0", "+ chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" + gv + \"'\" for gv", "IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really long", "smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in", "symbol ] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'),", "'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does", "to 0s # TODO: if not latest date; use date of datafile as", ">= start] constituent_companies = OrderedDict() # constituent_tickers = OrderedDict() lengths = [] #", "as annual fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann') # monthly security data", "sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year", "# need to figure out where new things are added, but do later", "df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'):", "df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads table if", "# volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe want to", "non-null object dtypes: float64(20), object(21) memory usage: 312.8+ MB so we can ignore", "after it's saved, good to go # TODO: put this in a clean", "last_idx += 1 # make an index for creating dataframe with last price,", "WARNING: does not appear to work properly. probably a sql ordering issue or", "historical security data # goes through all securities and downloads historical price data", "# # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library,", "or something # for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] =", "nrows elif nrows < current_rows: print('number of available rows is less than number", "\"\"\" # merge historical constituents for sp600 with daily price, eps, and market", "changes, and use last price to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for", "so if it is the 'thru' date, then shouldn't be included # but", "Ltd which I don't know what it is) SP1500: S&P 1500 Super Composite", "datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru']", "'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING:", "# # should be like ('item 1', 'item 2', 'item 3') # gvkeys_str", "= gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] #", "each day start = single_idx_df['from'].min() # get todays date and reset hour, min,", "downloads anew TODO: if update is True, then will try to find existing", "is not in common_stocks, figure out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y", "that tests some db functions and explores them \"\"\" df = db.get_table('comp', 'security',", "tests some db functions and explores them \"\"\" df = db.get_table('comp', 'security', obs=10)", "# seems to be the same as annual fundamentals # annual index fundamentals", "assume others not in here are 0 for now # get the overall", "and remove iid and gvkey -- should just merge or something # for", "dlrsni 01 is acquired, 02 is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm #", "index constituents \"\"\" import os import gc import time import datetime from collections", "first_days[5:])): # 2000 onward is [5:] ; market cap not available until 1999", "seems to be the same as annual fundamentals # annual index fundamentals db.get_table('comp',", "if update=True, will get latest date in current df, then get everything after", "= pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I think 0 or", "df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks (tpci = 0 and", "50 -- should take about 20m for 2k # took 1282s for 2127", "parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to do", "for now, just use gvkeyx which is # 030824 for sp600 # df2", "gets historical constituents from WRDS file common indexes as represented in the idx_ann", "for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) /", "1000000 non-null object curcddv 5861 non-null object capgn 29 non-null float64 cheqv 85", "out the db is fast to download because this is a small table...", "smallest_20_1y_chg = OrderedDict() # TODO: get latest price if stopped trading during the", "non-null float64 prcstd 999696 non-null float64 trfd 733884 non-null float64 exchg 1000000 non-null", "fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp', 'secm',", "and reset hour, min, sec to 0s # TODO: if not latest date;", "TODO: figure out why a few hundred are missing in the daily data", "'iid']) # get stocks' gvkeys for sql search -- no longer needed #", "pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition spending; aqcy column df4 = db.raw_sql('select", "should be like ('item 1', 'item 2', 'item 3') # gvkeys_str = '('", "(there's another one with Wed instead of Ltd which I don't know what", "'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey',", "statement extract daily db.get_table('comp', 'funda') # seems to be the same as annual", "acquired, 02 is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing", "within each library you can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't", "5780 non-null float64 divd 5694 non-null float64 divdpaydateind 0 non-null object divsp 129", "= {'key': 'data', 'mode': 'a', 'append': True, 'format': 'table', 'complib': 'blosc', 'complevel': 9}", "# parse dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH", "need currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc']", "smallest 20 market caps, get close price # get close price a year", "is the 'thru' date, then shouldn't be included # but stocks were added", "updated data and append # used once to write data # df.to_hdf(FILEPATH +", "+ ', '.join([\"'\" + s + \"'\" for s in common_securities['gvkey']]) + ')'", "'endofday', obs=100) # gets acquisition spending; aqcy column df4 = db.raw_sql('select * from", "on=['gvkey', 'iid']) # get adjusted closes for constituents now and 1y in future", "and index name - comes from idx_ann table in compd library -- need", "the db is fast to download because this is a small table... #", "20 SP600 stocks yearly returns, annualized return = ' + str(round(annualized_return, 1))) plt.ylabel('%", "2 non-null object anncdate 2776 non-null object capgnpaydate 29 non-null object cheqvpaydate 82", "in compd library -- need to rewrite query idxcst_his has the historical index", "ticker symbol ] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename +", "999696 non-null float64 cshoc 439670 non-null float64 cshtrd 999677 non-null float64 dvi 379938", "\"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx)", "Data” on page 91 and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd',", "# ends up with very slow sql query; avoid securities = pd.read_hdf(FILEPATH +", "library -- need to rewrite query idxcst_his has the historical index constituents \"\"\"", "current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities", "= d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)] current_companies =", "idxcst_his .h5 files have same name as table \"\"\" df_filepath = FILEPATH +", "num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD", "below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO: # need to check", "the Data” on page 91 and on (chapter 6) 'cshoc', # shares outstanding", "if update is True, then will try to find existing dataframe and only", "'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does not appear to work properly.", "elif tablename == 'idxcst_his': download_index_constituents() else: print('not one of predefined tables to download')", "+ 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci", "query takes -- about 2s # start = time.time() # df = db.raw_sql('select", "concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt import numpy as np import pandas_market_calendars", "to speed up # takes about 10s for nasdaq 100 for d in", "on 'from' date # use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from']", "TODO: check if append works with out-of-order columns # TODO: find next stock", "# TODO: get tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward", "0s # TODO: if not latest date; use date of datafile as latest", "= list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx += 1 # make an index", "price a year later, calculate overall return # repeat ad nauseum # common_stocks", "'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', # low 'prcod',", "missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO:", "market caps, get close price # get close price a year later, calculate", "'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', # low 'prcod'] #", "0: print(\"no data to be found!\") return # convert datadate to datetime64 df['datadate']", "current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) #", "bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc, like in", "too, not sure ] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd',", "to enter credentials to log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password')", "% price change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future']", "index='S&P Smallcap 600 Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents", "# get stocks missing from price changes, and use last price to get", "i, start in enumerate(range(0, nrows, nobs), 1): print('on part', str(i)) df = db.get_table(library,", "not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows =", "'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate',", "False if not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else:", "get latest date already downloaded and use sql query to gegt updates; then", "which are stocks you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) #", "gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def", "S&P Midcap 400 Index SP500: S&P 500 Comp-Ltd (there's another one with Wed", "= pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df", "per cpu for 8 cores; default is 5 per CPU # seems like", "len(gvkeyx) > 1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx", "df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' +", "to go # TODO: put this in a clean function and do when", "'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo',", "lengths = [] # TODO: multiprocessing to speed up # takes about 10s", "gv), date_cols=['datadate']) # dfs.append(df) # testing # df = db.raw_sql('select {} from {}.{}", "+ 1, chunk_size)): # first make strings out of gvkeys for SQL query", "with last price, so we can append it to the bottom_20_price_chg df price_chg_dict", "float64 div 5780 non-null float64 divd 5694 non-null float64 divdpaydateind 0 non-null object", "-- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1]", "table has changed; if so, downloads anew TODO: if update is True, then", "obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat data #", "to HDF5 for tables like 'security', check if any more rows and grab", "library, tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing # df = db.raw_sql('select {}", "get latest date in current df, then get everything after that and add", "= {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing # df =", "so we can append it to the bottom_20_price_chg df price_chg_dict = {} iid", "non-null object conm 1000000 non-null object curcddv 5861 non-null object capgn 29 non-null", "gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing # df", "= {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads full", "single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys for sql search -- no longer", "sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index", "439670 non-null float64 cshtrd 999677 non-null float64 dvi 379938 non-null float64 eps 309295", "/ price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with", "1 # make an index for creating dataframe with last price, so we", "checks if current rows is less than rows in db; returns True is", "'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600", "print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0] #", "datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] = df['cshoc'] * df['prccd']", "ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first make strings out of", "a small table... # no need to update row by row, and can't", "ch + chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" + gv + \"'\" for", "# big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead of hard copy big_df.to_hdf(FILEPATH +", "table == 'idxcst_his': # converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True)", "# rows_to_get = nrows # offset = 0 df = db.get_table(library=library, table=table, obs=nrows,", "SQL query start = ch if ch + chunk_size > len(remaining_gvs): gvkeys_strings =", "check to make sure only one iid per gvkey -- not quite gvkey_grp", "{}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes", "os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to use pgpass file; see here: #", "of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace", "dvrated 2875 non-null float64 paydateind 2 non-null object anncdate 2776 non-null object capgnpaydate", "in jobs: # print(gv) dfs.append(j.result()) end = time.time() print('took', int(end - start), 'seconds')", "ticker and remove iid and gvkey -- should just merge or something #", "- 1 # else: # rows_to_get = nrows # offset = 0 df", "'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth') # simplified financial statement extract daily", "from tqdm import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data',", "start in enumerate(range(0, nrows, nobs), 1): print('on part', str(i)) df = db.get_table(library, tablename,", "+ \"'\" for gv in remaining_gvs[start:ch + chunk_size]] start = time.time() jobs =", "Smallcap 600 Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from", "changed; if so, downloads anew TODO: if update is True, then will try", "expect it to be about 100GB in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd',", "91 and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate',", "as np import pandas_market_calendars as mcal import pandas as pd from tqdm import", "df.shape[0] > 0: print(\"no data to be found!\") return # convert datadate to", "# only keep common stocks (tpci = 0 and F) common_securities_short = common_securities[['gvkey',", "in the idx_ann table: SP600: S&P Smallcap 600 Index SP400: S&P Midcap 400", "bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with out-of-order columns", "exchg 1000000 non-null float64 secstat 1000000 non-null object tpci 1000000 non-null object cik", "where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {} from {}.{} WHERE", "# chunk through remaining gvkeys in 10 chunks chunk_size = len(remaining_gvs) // 10", "not in here are 0 for now # get the overall price changes", "# print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only common stocks", "# get adjusted closes for constituents now and 1y in future const_current_price =", "gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): #", "db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily data", "for storing all updated data and append # used once to write data", "'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers =", "with acquisitions: dlrsni 01 is acquired, 02 is bankrupt, 03 is liquidated #", "non-null object divsp 129 non-null float64 dvrated 2875 non-null float64 paydateind 2 non-null", "'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make string for SQL query: WHERE", "appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del df del", "these seem to be weird tickers; buyouts or something # ignore stocks on", "date, then shouldn't be included # but stocks were added on 'from' date,", "paydateind 2 non-null object anncdate 2776 non-null object capgnpaydate 29 non-null object cheqvpaydate", "29 non-null object cheqvpaydate 82 non-null object divdpaydate 5691 non-null object divsppaydate 128", "and downloads historical price data # chunk through remaining gvkeys in 10 chunks", "date_cols=['datadate']) def testing_db(): \"\"\" looks like some code that tests some db functions", "string for SQL query: WHERE IN # # should be like ('item 1',", "db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) #", "tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long one query takes -- about 2s", "on 'thru', so if it is the 'thru' date, then shouldn't be included", "needs updating') return False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now;", "**hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks (tpci = 0", "'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi']", "== 'idxcst_his': # converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru']", "axis=1) # get % price change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates()", "sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv", "copy warning but can't get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse", "= current_tickers lengths.append(current_companies.shape[0]) # look at number of constituents as a histogram; mostly", "in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi',", "== m]['iid'].values if len(iid) > 1: print('shit, iid length >1') iid = iid[0]", "dvi 379938 non-null float64 eps 309295 non-null float64 epsmo 309295 non-null float64 prccd", "gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents then looks", "what returns are on yearly rebalance for 20 smallest marketcap stocks # just", "gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\"", "and iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) #", "m in missing_gvkeys: last_idx += 1 # make an index for creating dataframe", "constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first year starts on", "float64 epsmo 309295 non-null float64 prccd 999696 non-null float64 prchd 986959 non-null float64", "ajexdi 999696 non-null float64 cshoc 439670 non-null float64 cshtrd 999677 non-null float64 dvi", "ThreadPoolExecutor import matplotlib.pyplot as plt import numpy as np import pandas_market_calendars as mcal", "= db.raw_sql('select {} from {}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename),", "in chunks because it is too huge -- expect it to be about", "del big_df gc.collect() # 30 seconds per 50 -- should take about 20m", "in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first make strings out of gvkeys", "'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD /", "or F for tpci are common or ADR, which are stocks you can", "or something nobs = 10000000 for i, start in enumerate(range(0, nrows, nobs), 1):", "to gegt updates; then save to HDF5 for tables like 'security', check if", "date in current df, then get everything after that and add to current", "match historical data if date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: #", "last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid # TODO: check this isn't more", "= 0 # turns out the db is fast to download because this", "'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic',", "for multiple companies # get unique dates where changes were made unique_dates =", "and ADR, or tpci column is 0 or F) if update=True, will get", "db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not pgpass working # db.create_pgpass_file()", "= missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is", "df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey),", "pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect()", "stock in bottom 20 at time the other was put out, and see", "to rewrite query idxcst_his has the historical index constituents \"\"\" import os import", "current_stocks[['gvkey', 'iid']] # company names # current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string]", "table=table, library=library) if up_to_date: return offset = 0 # turns out the db", "get smallest 20 market caps, get close price # get close price a", "index gvkeyx and index name - comes from idx_ann table in compd library", "{}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\"", "= os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to use pgpass file; see here:", "takes about 10s for nasdaq 100 for d in tqdm(date_range): # if date", "= mcal.date_range(start=start, end=end) # gets only dates valid for NYSE -- doesn't seem", "date; False if not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0]", "tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ;", "= constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] #", "annualized_return = (np.prod([1 + p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1)", "bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey',", "cutoff at earliest date for index date_range = np.array(sorted(date_range)) date_range = date_range[date_range >=", "\"\"\" creates connection to WRDS database need to enter credentials to log in", "{} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df)", "I think 0 or F for tpci are common or ADR, which are", "pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0 or F #", "get latest date and download updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df", "already downloaded and use sql query to gegt updates; then save to HDF5", "common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird tickers;", "sp600 I think this was actually used to get all historical stock data", "starts on sept year_dates = [d for d in sp600_dates if d.year ==", "= sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data starts in 1994 years =", "+ gv + \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) #", "figure out why a few hundred are missing in the daily data from", "last price, so we can append it to the bottom_20_price_chg df price_chg_dict =", "than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0] # TODO:", "download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table by name; library also required. default", "# see how long one query takes -- about 2s # start =", "return gvkeyx = gvkeyx[0] # TODO: get latest file # parse dates not", "date for index date_range = np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies =", "historical price data # chunk through remaining gvkeys in 10 chunks chunk_size =", "in the daily data from the constituent list # AIR ('001004') is not", "LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really long time... #", "FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry =", "# df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern')", "up_to_date: return offset = 0 # turns out the db is fast to", "m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg =", "+ gv + \"'\" for gv in remaining_gvs[start:ch + chunk_size]] start = time.time()", "one of predefined tables to download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks", "float64 trfd 733884 non-null float64 exchg 1000000 non-null float64 secstat 1000000 non-null object", "'idxcst_his' # check if any new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date,", "'prcld', # low 'prcod', # open 'tic' # ticker symbol ] df =", "* 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns,", "= get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first year starts on sept year_dates", "# current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del", "] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings)", "20 # assume others not in here are 0 for now # get", "to filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price", "for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need", "common_df.drop('curcdd', inplace=True, axis=1) # drop currency column # write existing data as hdf", "df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\"", "db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat data # short", "them \"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo',", "-- need to rewrite query idxcst_his has the historical index constituents \"\"\" import", "mcal.date_range(start=start, end=end) # gets only dates valid for NYSE -- doesn't seem to", "TODO: get latest date already downloaded and use sql query to gegt updates;", "FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9}", "df def download_all_security_data(): \"\"\" downloads full security data history for sp600 I think", "for each day start = single_idx_df['from'].min() # get todays date and reset hour,", "= pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities", "masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)]", "non-null float64 ajexdi 999696 non-null float64 cshoc 439670 non-null float64 cshtrd 999677 non-null", "# check to make sure only one iid per gvkey -- not quite", "100GB in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid',", "and add to current df \"\"\" # filename from first iteration # secd_filename", "here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not pgpass", "= securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted from", "= db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate'])", "price data for securities cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD /", "'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH +", "tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date # gives copy warning but can't", "offset = 0 # turns out the db is fast to download because", "print('took', int(end - start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate']", "# df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up with other dataframe", "1y in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid',", "instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del", "db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return", "'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO:", "in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ; market cap not available", "= OrderedDict() # constituent_tickers = OrderedDict() lengths = [] # TODO: multiprocessing to", "gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads", "= bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: # everything", "took a few seconds even with 1M rows # query_str = 'select count(gvkey)", "def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now; use download_small_table function instead gets", "date is within stock's from and thru, add to list # stocks were", "pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I think 0 or F", "to be found!\") return # convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') #", "etc # TODO: get tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000", "get % price change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] =", "1999 for these stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate']", "inplace=True) # check to make sure only one iid per gvkey -- not", "nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap']", "# 30 seconds per 50 -- should take about 20m for 2k #", "you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to", "'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does not", "# index prices daily db.get_table('comp', 'idx_mth') # simplified financial statement extract daily db.get_table('comp',", "companies # get unique dates where changes were made unique_dates = set(single_idx_df['from'].unique()) |", "new stuff, or just grab whole table if cant figure out what new", "if not latest date; use date of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0,", "= iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price", "inplace=True, axis=1) # get % price change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey',", "parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs", "/ 20 # assume others not in here are 0 for now #", "for now # get the overall price changes each year annualized_return = (np.prod([1", "= df['cshoc'] * df['prccd'] # TODO: create file for storing all updated data", "db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth') #", "you can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily')", "to be about 100GB in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate',", "'item 3') # gvkeys_str = '(' + ', '.join([\"'\" + s + \"'\"", "actually used to get all historical stock data actually, not just sp600. TODO:", "**hdf_settings) # add ticker and remove iid and gvkey -- should just merge", "for nasdaq 100 for d in tqdm(date_range): # if date is within stock's", "return = ' + str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout() plt.show() #", "> 1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx =", "p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values())", "for sql search -- no longer needed # gvkeys = single_idx_df['gvkey'].values # create", "if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows = 0 nrows", "the securities data db -- has historical price data for securities cols_to_use =", "https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price changes and check for bankruptcy or", "float64 dvi 379938 non-null float64 eps 309295 non-null float64 epsmo 309295 non-null float64", "get all historical stock data actually, not just sp600. TODO: get latest date", "year; figure out mergers/buyouts, etc # TODO: get tickers for start, end in", "'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird tickers; buyouts or something", "as represented in the idx_ann table: SP600: S&P Smallcap 600 Index SP400: S&P", "from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) #", "append it to the bottom_20_price_chg df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] ==", "(single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']] # company names # current_tickers =", "1))) plt.ylabel('% return per year') plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid',", "-- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename,", "start = time.time() # df = db.raw_sql('select {} from {}.{} WHERE gvkey =", "'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd',", "can't get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') #", "even with 1M rows # query_str = 'select count(gvkey) from comp.secd where datadate", "'append': True, 'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted", "# # make string for SQL query: WHERE IN # # should be", "get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols,", "= wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not pgpass working # db.create_pgpass_file() return", "this. # full data query only took a few seconds even with 1M", "sp600_gvkeys_strings = [\"'\" + gv + \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string =", "312.8+ MB so we can ignore most of those middle columns cols_to_use =", "'thru' date, then shouldn't be included # but stocks were added on 'from'", "float64 cshtrd 999677 non-null float64 dvi 379938 non-null float64 eps 309295 non-null float64", "constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes for constituents now and 1y in", "data for securities cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI", "LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like some code that", "1000000 non-null float64 secstat 1000000 non-null object tpci 1000000 non-null object cik 922655", "columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] =", "sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data starts in 1994 years", "needed # gvkeys = single_idx_df['gvkey'].values # create dataframe with list of constituents for", "TODO: get latest date and download updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf')", "float64 divd 5694 non-null float64 divdpaydateind 0 non-null object divsp 129 non-null float64", "but do later # if update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' #", "securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical security data #", "bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if", "'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') # industry", "very slow sql query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0',", "# testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey = \\'001004\\'", "start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) #", "# don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire", "use last price to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in", "# offset = 0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date", "wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to use pgpass file; see", "all updated data and append # used once to write data # df.to_hdf(FILEPATH", "of constituents as a histogram; mostly 600 but a few above and below", "# query_str = 'select count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str)", "stuff, or just grab whole table if cant figure out what new stuff", "'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi']", "# get new rows plus the last one to check it's the same", "1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly", "in db; returns True is up to date; False if not \"\"\" if", "# 030824 for sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx']", "or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid',", "WRDS database need to enter credentials to log in \"\"\" wrds_uname = os.environ.get('wrds_username')", "don't know what it is) SP1500: S&P 1500 Super Composite NASDAQ 100: Nasdaq", "object iid 1000000 non-null object datadate 1000000 non-null object tic 1000000 non-null object", "Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values", "986959 non-null float64 prcld 985637 non-null float64 prcod 224624 non-null float64 prcstd 999696", "with daily price, eps, and market cap data # see what returns are", "daily data from the constituent list # AIR ('001004') is not in common_stocks,", "df['market_cap'] = df['cshoc'] * df['prccd'] # TODO: create file for storing all updated", "# reads in all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values", "start = ch if ch + chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" +", "'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass # with limit for testing", "things are added, but do later # if update: # cst_filepath = FILEPATH", "SP600: S&P Smallcap 600 Index SP400: S&P Midcap 400 Index SP500: S&P 500", "dataframe and only update it \"\"\" library = 'comp' table = 'idxcst_his' #", "pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO: # need to check that no", "= 'select count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str =", "maybe want to get iid too, not sure ] other_cols = ['adrrc', 'anncdate',", "now; use download_small_table function instead gets historical index constituents from compustat table checks", "999696 non-null float64 prchd 986959 non-null float64 prcld 985637 non-null float64 prcod 224624", "'030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv + \"'\" for gv", "smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH", "gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from {}.{} WHERE gvkey =", "ones for compustat (comp) are: security names_ix idxcst_his .h5 files have same name", "buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird", "= common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no", "dfs = [] # for gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {}", "prices daily db.get_table('comp', 'idx_mth') # simplified financial statement extract daily db.get_table('comp', 'funda') #", "+ 'hdf/idxcst_his.hdf') # only need to do this once, then after it's saved,", "second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date # gives copy warning", "gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for gv, j in", "as executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for", "-- should take about 20m for 2k # took 1282s for 2127 gvkeys", "object datadate 1000000 non-null object tic 1000000 non-null object cusip 1000000 non-null object", "== current_rows: print('up to date') return True, nrows elif nrows < current_rows: print('number", "simultaneous queries is max -- run in parallel with ThreadPoolExecutor(max_workers=5) as executor: for", "latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's", "20 market caps, get close price # get close price a year later,", "'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data starts in 1994", "constituent list # AIR ('001004') is not in common_stocks, figure out why full_const", "import time import datetime from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor import", "1000000 non-null object datadate 1000000 non-null object tic 1000000 non-null object cusip 1000000", "table in compd library -- need to rewrite query idxcst_his has the historical", "divsppaydate 128 non-null object paydate 5772 non-null object recorddate 2906 non-null object curcdd", "const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] =", "non-null float64 prchd 986959 non-null float64 prcld 985637 non-null float64 prcod 224624 non-null", "'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi',", "\"\"\" looks like some code that tests some db functions and explores them", "prccd 999696 non-null float64 prchd 986959 non-null float64 prcld 985637 non-null float64 prcod", "+ \"'\" for s in common_securities['gvkey']]) + ')' # if you want to", "'prcld', 'prcod'] # WARNING: does not appear to work properly. probably a sql", "= pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date #", "+ 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return offset", "for all common stocks (US and ADR, or tpci column is 0 or", "trading during the year; figure out mergers/buyouts, etc # TODO: get tickers for", "colculate market cap df['market_cap'] = df['cshoc'] * df['prccd'] # TODO: create file for", "query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # #", "from comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {} from", "common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] ==", "# dfs.append(df) # testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey", "and thru, add to list # stocks were removed on 'thru', so if", "const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap',", "gvkeys = df['gvkey'].unique() # I think 0 or F for tpci are common", "'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data', 'mode': 'a', 'append':", "# big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set", "it different/better to rebalance on a certain day/month? def secd_info(): \"\"\" info of", "on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure only one iid", "add to list # stocks were removed on 'thru', so if it is", "data and append # used once to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf',", "at number of constituents as a histogram; mostly 600 but a few above", "gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't", "find next stock in bottom 20 at time the other was put out,", "prcld 985637 non-null float64 prcod 224624 non-null float64 prcstd 999696 non-null float64 trfd", "etc, like in 2006 and 07 I think last_idx = 0 else: last_idx", "'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is it different/better", "# low 'prcod', # open 'tic' # ticker symbol ] df = db.get_table(library,", "= ch if ch + chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" + gv", "full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] ==", "const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20", "+ 'hdf/security.hdf') # abbreviated securities df; only ticker, gvkey, and iid sec_short =", "pgpass working # db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some exploration of the", "for sp600 with daily price, eps, and market cap data # see what", "object capgn 29 non-null float64 cheqv 85 non-null float64 div 5780 non-null float64", "are 0 for now # get the overall price changes each year annualized_return", "for y in tqdm(years[1:]): # first year starts on sept year_dates = [d", "inplace=True) # no longer need currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] =", "if current rows is less than rows in db; returns True is up", "'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd']", "import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w', 'complib': 'blosc',", "gvkeyx which is # 030824 for sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf')", "columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename == 'sec_dprc': #", "date_cols=['datadate']) # dfs.append(df) # testing # df = db.raw_sql('select {} from {}.{} WHERE", "**hdf_settings) elif tablename == 'sec_dprc': # need to dl in chunks because it", "most of those middle columns cols_to_use = ['ajexdi', 'cshoc', # shares outstanding 'cshtrd',", "df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db,", "{} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1: print('shit, iid length", "be found!\") return # convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate", "non-null object paydate 5772 non-null object recorddate 2906 non-null object curcdd 999696 non-null", "stocks yearly returns, annualized return = ' + str(round(annualized_return, 1))) plt.ylabel('% return per", "df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string),", "single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys for sql search --", "nrows, nobs), 1): print('on part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start)", "check if append works with out-of-order columns # TODO: find next stock in", "'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic',", "prcstd 999696 non-null float64 trfd 733884 non-null float64 exchg 1000000 non-null float64 secstat", "annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') #", "daily data db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100) # gets", "current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0 or F # ends up with", "pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru']", "offset = 0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date columns", "file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but", "just sp600. TODO: get latest date and download updates \"\"\" df = pd.read_hdf(FILEPATH", "9} secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding", "'iid']) # TODO: deal with acquisitions: dlrsni 01 is acquired, 02 is bankrupt,", "= len(remaining_gvs) // 10 for i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)):", "clean function and do when saving the file # df['from'] = pd.to_datetime(df['from'], utc=True)", "historical index constituents \"\"\" import os import gc import time import datetime from", "print('up to date') return True, nrows elif nrows < current_rows: print('number of available", "functions and explores them \"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip',", "gvkeys_strings = [\"'\" + gv + \"'\" for gv in remaining_gvs[start:ch + chunk_size]]", "current db;') print('something is wrong...') return True, nrows else: print('db needs updating') return", "= [\"'\" + gv + \"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings =", "del jobs del dfs del big_df gc.collect() # 30 seconds per 50 --", "per gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids", "in tqdm(years[1:]): # first year starts on sept year_dates = [d for d", "'tic' # maybe want to get iid too, not sure ] other_cols =", "rows # query_str = 'select count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date) #", "iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid", "'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')", "test_sql_queries(): pass # with limit for testing # df = db.raw_sql('select {} from", "+ 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up with other dataframe maybe, for", "nobs), 1): print('on part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH", "start), 'seconds') # takes about 2h linearly # dfs = [] # for", "if date is within stock's from and thru, add to list # stocks", "and gvkey -- should just merge or something # for gvkey in tqdm(common_df['gvkey'].unique()):", "last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) #", "for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ; market", "mergers/buyouts, etc # TODO: get tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): #", "= single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys for sql search -- no", "CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer need currency, all USD", "+ 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than", "= pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add", "# TODO: create file for storing all updated data and append # used", "to count how many rows are there, use this. # full data query", "columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del df", "= (np.prod([1 + p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) *", "TODO: get latest file # parse dates not working for hdf, parse_dates=['from', 'thru'],", "be weird tickers; buyouts or something # ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index,", "41 columns): gvkey 1000000 non-null object iid 1000000 non-null object datadate 1000000 non-null", "daily db.get_table('comp', 'idx_mth') # simplified financial statement extract daily db.get_table('comp', 'funda') # seems", "df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif", "gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security data from sec_dprc table \"\"\" dfs", "enter credentials to log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') #", "# get close price a year later, calculate overall return # repeat ad", "WHERE datadate > \\'{}\\';'# and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table,", "= df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True,", "= 0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date columns to", "pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd']", "current_tickers lengths.append(current_companies.shape[0]) # look at number of constituents as a histogram; mostly 600", "0: # everything was acquired/bankrupt, etc, like in 2006 and 07 I think", "for gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv + \"'\" for", "stocks missing from price changes, and use last price to get price change", "hdf file # rows_to_get = nrows - const_df.shape[0] + 1 # offset =", "db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) #", "OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get latest price if", "for m in missing_gvkeys: last_idx += 1 # make an index for creating", "\\'{}\\';'# and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) #", "is a small table... # no need to update row by row, and", "make string for SQL query: WHERE IN # # should be like ('item", "lib download entire table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd secd", "combine with securities for ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated", "security data from sec_dprc table \"\"\" dfs = [] for i in tqdm(range(1,", "& (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m", "in price changes and check for bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey'])))", "float64 dvrated 2875 non-null float64 paydateind 2 non-null object anncdate 2776 non-null object", "object capgnpaydate 29 non-null object cheqvpaydate 82 non-null object divdpaydate 5691 non-null object", "'trfd'] \"\"\" pass def test_sql_queries(): pass # with limit for testing # df", "for 20 smallest marketcap stocks # just get first of year dates, then", "def list_libs_tables(): \"\"\" some exploration of the db lists libraries, and within each", "date') return True, nrows elif nrows < current_rows: print('number of available rows is", "db.raw_sql('select {} from {}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate'])", "don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table", "db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index", "out of gvkeys for SQL query start = ch if ch + chunk_size", "common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove iid and gvkey --", "securities data db -- has historical price data for securities cols_to_use = ['ajexdi',", "# market cap/price, daily data db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday',", "at time the other was put out, and see how it does smallest_20_1y_chg[datestr]", "predefined tables to download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if current", "properly. probably a sql ordering issue or something nobs = 10000000 for i,", "seconds even with 1M rows # query_str = 'select count(gvkey) from comp.secd where", "update=True, will get latest date in current df, then get everything after that", "# everything was acquired/bankrupt, etc, like in 2006 and 07 I think last_idx", "from the constituent list # AIR ('001004') is not in common_stocks, figure out", "stocks on 'from' date # use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks =", "log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to use", "acquisitions: dlrsni 01 is acquired, 02 is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm", "table='secd', library='comp'): \"\"\" downloads data for all common stocks (US and ADR, or", "stock's from and thru, add to list # stocks were removed on 'thru',", "some db functions and explores them \"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp',", "= single_idx_df['from'].min() # get todays date and reset hour, min, sec to 0s", "d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey',", "use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves", "ADR, or tpci column is 0 or F) if update=True, will get latest", "S&P Smallcap 600 Index SP400: S&P Midcap 400 Index SP500: S&P 500 Comp-Ltd", "if cant figure out what new stuff is \"\"\" nrows = db.get_row_count(library, tablename)", "write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only", "date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date", "-- expect it to be about 100GB in memory cols_to_use = ['ajexdi', 'cshoc',", "'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 =", "date_cols=['datadate']) # see how long one query takes -- about 2s # start", "# drop columns which seem to have weird dates df.drop(df['prccd'].apply(lambda x: x is", "999696 non-null object adrrc 4202 non-null float64 ajexdi 999696 non-null float64 cshoc 439670", "get company market caps # get smallest 20 market caps, get close price", "to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid'])", "= pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows = 0 nrows = db.get_row_count(library=library, table=table)", "these stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start]", "chunk through remaining gvkeys in 10 chunks chunk_size = len(remaining_gvs) // 10 for", "# index constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily data db.get_table('comp', 'secd', obs=100)", "saving the file # df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True)", "download_all_security_data(): \"\"\" downloads full security data history for sp600 I think this was", "\"\"\" library = 'comp' table = 'idxcst_his' # check if any new rows", "'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI", "'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only common stocks securities = pd.read_hdf(FILEPATH +", "one to check it's the same as the # # last one currently", "to check that no tickers are used for multiple companies # get unique", "'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat data # short data db.get_table('comp', 'sec_shortint',", "tpci are common or ADR, which are stocks you can buy common_stocks =", "IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long one query takes", "not pgpass working # db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some exploration of", "this once, then after it's saved, good to go # TODO: put this", "look at number of constituents as a histogram; mostly 600 but a few", "TODO: dynamically set date instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del", "else: last_idx = bottom_20_price_chg.index[-1] # get stocks missing from price changes, and use", "pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows = 0 nrows = db.get_row_count(library=library, table=table) if", "in sp600_dates if d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving key error in", "missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx += 1 # make an", "obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition spending; aqcy column", "= os.environ.get('wrds_password') # tries to use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db", "company names # current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies #", "common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency column #", "chunk_size]] start = time.time() jobs = [] # 10 threads per cpu for", "nrows - const_df.shape[0] + 1 # offset = const_df.shape[0] - 1 # else:", "use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer need currency, all", "'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data', 'mode': 'a',", "common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency", "security data history for sp600 I think this was actually used to get", "unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first year starts on sept", "999999 Data columns (total 41 columns): gvkey 1000000 non-null object iid 1000000 non-null", "lists libraries, and within each library you can list tables \"\"\" db.list_libraries() db.list_tables('zacks')", "nobs = 10000000 for i, start in enumerate(range(0, nrows, nobs), 1): print('on part',", "plt.ylabel('% return per year') plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']],", "securities and downloads historical price data # chunk through remaining gvkeys in 10", "implement 20 smallest SPY strategy from paper (see beat_market_analysis github repo) \"\"\" #", "big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!!", "# df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common", "# get % price change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg']", "index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index])", "plus the last one to check it's the same as the # #", "float64 secstat 1000000 non-null object tpci 1000000 non-null object cik 922655 non-null object", "= ' + str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout() plt.show() # to", "find existing dataframe and only update it \"\"\" library = 'comp' table =", "the library; common ones for compustat (comp) are: security names_ix idxcst_his .h5 files", "historical stock data actually, not just sp600. TODO: get latest date and download", "else: current_rows = 0 nrows = db.get_row_count(library=library, table=table) if nrows == current_rows: print('up", "= bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1: print('shit, iid length >1') iid", "F # ends up with very slow sql query; avoid securities = pd.read_hdf(FILEPATH", "\"\"\" names_ix has index gvkeyx and index name - comes from idx_ann table", "# if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # #", "in bottom 20 at time the other was put out, and see how", "as the # # last one currently in the hdf file # rows_to_get", "= OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get latest price if stopped trading", "# check if any new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows", "financial statement extract daily db.get_table('comp', 'funda') # seems to be the same as", "== '010565'] # TODO: is it different/better to rebalance on a certain day/month?", "or F # ends up with very slow sql query; avoid securities =", "# common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap", "del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {} from {}.{}", "tic 1000000 non-null object cusip 1000000 non-null object conm 1000000 non-null object curcddv", "df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True)", "= sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first year", "4202 non-null float64 ajexdi 999696 non-null float64 cshoc 439670 non-null float64 cshtrd 999677", "as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with", "columns): gvkey 1000000 non-null object iid 1000000 non-null object datadate 1000000 non-null object", "to rebalance on a certain day/month? def secd_info(): \"\"\" info of first 1M", "# full data query only took a few seconds even with 1M rows", "get close price a year later, calculate overall return # repeat ad nauseum", "cik 922655 non-null object fic 1000000 non-null object dtypes: float64(20), object(21) memory usage:", "-- should just merge or something # for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey']", "cap/price, daily data db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100) #", "default library is the compstat lib download entire table e.g. tablename='sec_shortint' tables downloaded", "quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq')", "0 non-null object divsp 129 non-null float64 dvrated 2875 non-null float64 paydateind 2", "if tablename == 'secd': # this is the securities data db -- has", "# first year starts on sept year_dates = [d for d in sp600_dates", "any new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath,", "sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() # print('took', int(end - start), 'seconds') #", "and append # used once to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings)", "use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru']", "pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data starts", "epsmo 309295 non-null float64 prccd 999696 non-null float64 prchd 986959 non-null float64 prcld", "conm 1000000 non-null object curcddv 5861 non-null object capgn 29 non-null float64 cheqv", "{'key': 'data', 'mode': 'a', 'append': True, 'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use", "gvkeys_strings = [\"'\" + gv + \"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings", "library='comp', tablename='secd'): df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols, library,", "stocks were added on 'from' date, so include stocks on 'from' date #", "working anyhow # need to figure out where new things are added, but", "= gvkeyx[0] # TODO: get latest file # parse dates not working for", "gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids >", "# I think 0 or F for tpci are common or ADR, which", "'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys for sql", "= db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) #", "Data columns (total 41 columns): gvkey 1000000 non-null object iid 1000000 non-null object", "d) & (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']] # company names #", "(comp) are: security names_ix idxcst_his .h5 files have same name as table \"\"\"", "name; library also required. default library is the compstat lib download entire table", "market caps # get smallest 20 market caps, get close price # get", "= 10000000 for i, start in enumerate(range(0, nrows, nobs), 1): print('on part', str(i))", "too huge -- expect it to be about 100GB in memory cols_to_use =", "const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get % price", "want to get iid too, not sure ] other_cols = ['adrrc', 'anncdate', 'capgn',", "= [] # 10 threads per cpu for 8 cores; default is 5", "single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities for ticker symbol securities", "less than number in current db;') print('something is wrong...') return True, nrows else:", "overall return # repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks", "df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop columns which seem to", "and 07 I think last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1] # get", "df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def", "733884 non-null float64 exchg 1000000 non-null float64 secstat 1000000 non-null object tpci 1000000", "security names_ix idxcst_his .h5 files have same name as table \"\"\" df_filepath =", "x: x is None).index, inplace=True) if not df.shape[0] > 0: print(\"no data to", "dates where changes were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates", "data to be found!\") return # convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern')", "rows_to_get = nrows - const_df.shape[0] + 1 # offset = const_df.shape[0] - 1", "[] for i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df =", "gets all dates # date_range = mcal.date_range(start=start, end=end) # gets only dates valid", "do this once, then after it's saved, good to go # TODO: put", "s + \"'\" for s in common_securities['gvkey']]) + ')' # if you want", "columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat data # short data db.get_table('comp',", "m price_chg_dict['iid'] = iid # TODO: check this isn't more than one result,", "- start), 'seconds') # takes about 2h linearly # dfs = [] #", "= time.time() print('took', int(end - start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern')", "up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return df = db.get_table(library=library,", "# https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price changes and check for bankruptcy", "\"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to use pgpass file;", "'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data', 'mode': 'a', 'append': True, 'format':", "TODO: if not latest date; use date of datafile as latest end =", "{'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data', 'mode':", "0 to 999999 Data columns (total 41 columns): gvkey 1000000 non-null object iid", "last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid # TODO: check this", "capgnpaydate 29 non-null object cheqvpaydate 82 non-null object divdpaydate 5691 non-null object divsppaydate", "download_small_table(db, table, library='comp'): \"\"\" downloads table if needs updating table can be a", "np import pandas_market_calendars as mcal import pandas as pd from tqdm import tqdm", "downloads an entire table by name; library also required. default library is the", "mcal.get_calendar('NYSE') # gets all dates # date_range = mcal.date_range(start=start, end=end) # gets only", "columns cols_to_use = ['ajexdi', 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps',", "in the library; common ones for compustat (comp) are: security names_ix idxcst_his .h5", "in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd',", "df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads table if needs updating", "downloads table if needs updating table can be a tablename in the library;", "\"\"\" info of first 1M rows of secd: RangeIndex: 1000000 entries, 0 to", "utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'):", "\"\"\" loads all security data from sec_dprc table \"\"\" dfs = [] for", "up with very slow sql query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities", "{};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop columns which seem", "'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename == 'sec_dprc': # need to dl in", "library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long one query takes -- about", "# convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap']", "check if any more rows and grab new stuff, or just grab whole", "20m for 2k # took 1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads", "= const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc,", "import os import gc import time import datetime from collections import OrderedDict from", "# abbreviated securities df; only ticker, gvkey, and iid sec_short = securities[['tic', 'gvkey',", "# gets only dates valid for NYSE -- doesn't seem to match historical", "was put out, and see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] =", "to update row by row, and can't get it working anyhow # need", "know what it is) SP1500: S&P 1500 Super Composite NASDAQ 100: Nasdaq 100", "cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld',", "working # db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some exploration of the db", "TODO: put this in a clean function and do when saving the file", "bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price changes", "to download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if current rows is", "10 for i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first make", "of the db lists libraries, and within each library you can list tables", "thru, add to list # stocks were removed on 'thru', so if it", "gvkeyx = gvkeyx[0] # TODO: get latest file # parse dates not working", "anyhow # need to figure out where new things are added, but do", "dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] >", "table can be a tablename in the library; common ones for compustat (comp)", "do later # if update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if", "file # df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from']", "[] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): #", "up to date; False if not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows", "non-null object recorddate 2906 non-null object curcdd 999696 non-null object adrrc 4202 non-null", "table, library='comp'): \"\"\" downloads table if needs updating table can be a tablename", "is max -- run in parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv in", "file common indexes as represented in the idx_ann table: SP600: S&P Smallcap 600", "df['prccd'] # TODO: create file for storing all updated data and append #", "only common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() #", "year starts on sept year_dates = [d for d in sp600_dates if d.year", "return offset = 0 # turns out the db is fast to download", "current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers", "convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] =", "jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for gv, j in jobs: # print(gv)", "table: SP600: S&P Smallcap 600 Index SP400: S&P Midcap 400 Index SP500: S&P", "use download_small_table function instead gets historical index constituents from compustat table checks if", "and can't get it working anyhow # need to figure out where new", "hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del df del common_df del", "nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return df = db.get_table(library=library, table=table,", "all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical security", "gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for gv, j in jobs: #", "library='comp'): \"\"\" downloads data for all common stocks (US and ADR, or tpci", "up with other dataframe maybe, for now, just use gvkeyx which is #", "constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at number of constituents as a histogram;", "# get unique dates where changes were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique())", "| set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to implement 20 smallest", "obs=100) # compustat data # short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals", "start] constituent_companies = OrderedDict() # constituent_tickers = OrderedDict() lengths = [] # TODO:", "stocks (US and ADR, or tpci column is 0 or F) if update=True,", "else: # cutoff at earliest date for index date_range = np.array(sorted(date_range)) date_range =", "stock data actually, not just sp600. TODO: get latest date and download updates", "= list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']]", "object divsppaydate 128 non-null object paydate 5772 non-null object recorddate 2906 non-null object", "new stuff is \"\"\" nrows = db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library,", "fic 1000000 non-null object dtypes: float64(20), object(21) memory usage: 312.8+ MB so we", "to find existing dataframe and only update it \"\"\" library = 'comp' table", "how long one query takes -- about 2s # start = time.time() #", "df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values)", "date # use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d)", "'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod',", "only dates valid for NYSE -- doesn't seem to match historical data if", "chunk_size = len(remaining_gvs) // 10 for i, ch in enumerate(range(0, len(remaining_gvs) + 1,", "part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename +", "(tpci = 0 and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey',", "utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] =", "get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets", "rebalance for 20 smallest marketcap stocks # just get first of year dates,", "etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']])", "'prchd', 'prcld', 'prcod', 'tic' # maybe want to get iid too, not sure", "missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] ==", "+ str(i)), **hdf_settings) del df gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else: print('not", "'cshoc', # shares outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd',", "is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price changes and check", "library='comp'): \"\"\" checks if current rows is less than rows in db; returns", "+ 1 # offset = const_df.shape[0] - 1 # else: # rows_to_get =", "1, chunk_size)): # first make strings out of gvkeys for SQL query start", "# company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look", "object divsp 129 non-null float64 dvrated 2875 non-null float64 paydateind 2 non-null object", "to get historical security data # goes through all securities and downloads historical", "be included # but stocks were added on 'from' date, so include stocks", "repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH +", "middle columns cols_to_use = ['ajexdi', 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate',", "a tablename in the library; common ones for compustat (comp) are: security names_ix", "testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({}) LIMIT", "<= d) & (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']] # company names", "iid per gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean()", "else: # rows_to_get = nrows # offset = 0 df = db.get_table(library=library, table=table,", "'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does not appear", "append # used once to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH", "plt.hist(lengths) # plt.show() # TODO: # need to check that no tickers are", "see how long one query takes -- about 2s # start = time.time()", "figure out what new stuff is \"\"\" nrows = db.get_row_count(library, tablename) print('number of", "inplace=True) if not df.shape[0] > 0: print(\"no data to be found!\") return #", "it's saved, good to go # TODO: put this in a clean function", "open 'tic' # ticker symbol ] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH", "= sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates =", "row, and can't get it working anyhow # need to figure out where", "sec to 0s # TODO: if not latest date; use date of datafile", "can't get it working anyhow # need to figure out where new things", "have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table by", "offset=offset) # converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] =", "constituents for each day start = single_idx_df['from'].min() # get todays date and reset", "to download because this is a small table... # no need to update", "sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out why", "figure out where new things are added, but do later # if update:", "\"\"\" gets historical constituents from WRDS file common indexes as represented in the", "high 'prcld', # low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\"", "rows and grab new stuff, or just grab whole table if cant figure", "'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to WRDS", "True, nrows else: print('db needs updating') return False, nrows def download_index_constituents(db, nrows=None, update=False):", "USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH", "latest price if stopped trading during the year; figure out mergers/buyouts, etc #", "# get smallest 20 market caps, get close price # get close price", "historical index constituents from compustat table checks if size of table has changed;", "'prccd', # close 'prchd', # high 'prcld', # low 'prcod', # open 'tic'", "remaining_gvs[start:ch + chunk_size]] start = time.time() jobs = [] # 10 threads per", "later, calculate overall return # repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH +", "10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like some code that tests", "latest date and download updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df =", "first iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf'", "in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv + \"'\" for gv in", "don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer need currency,", "os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # # get new", "TODO: check this isn't more than one result, may need to filter by", "to implement 20 smallest SPY strategy from paper (see beat_market_analysis github repo) \"\"\"", "+ 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only common stocks securities = pd.read_hdf(FILEPATH", "df_filepath, table=table, library=library) if up_to_date: return offset = 0 # turns out the", "from WRDS file common indexes as represented in the idx_ann table: SP600: S&P", "0 and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd',", "# open 'tic' # ticker symbol ] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows)", "all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd']", "day start = single_idx_df['from'].min() # get todays date and reset hour, min, sec", "but stocks were added on 'from' date, so include stocks on 'from' date", "None).index, inplace=True) if not df.shape[0] > 0: print(\"no data to be found!\") return", "sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first year starts", "constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily data db.get_table('comp', 'secd', obs=100) # OTC", "# df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename,", "y] first_days.append(min(year_dates)) # '1998-01-02' giving key error in constituent_companies price_chg_1y = OrderedDict() smallest_20", "onward is [5:] ; market cap not available until 1999 for these stocks", "','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to WRDS database need to enter credentials", "datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT", "'idxcst_his') # market cap/price, daily data db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc',", "030824 for sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] ==", "in parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv)))", "sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out why a few hundred are missing", "rows plus the last one to check it's the same as the #", "# if you want to count how many rows are there, use this.", "and explores them \"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno',", "# only need to do this once, then after it's saved, good to", "gv + \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads", "index constituents from compustat table checks if size of table has changed; if", "= mcal.get_calendar('NYSE') # gets all dates # date_range = mcal.date_range(start=start, end=end) # gets", "# used once to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH +", "remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv + \"'\" for gv in remaining_gvs[start:ch", "longer needed # gvkeys = single_idx_df['gvkey'].values # create dataframe with list of constituents", "missing in price changes and check for bankruptcy or acquisitions, etc missing_gvkeys =", "// 10 for i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first", "table by name; library also required. default library is the compstat lib download", "securities[securities['tpci'].isin(['0', 'F'])] # # make string for SQL query: WHERE IN # #", "think this was actually used to get all historical stock data actually, not", "run in parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df,", "gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data for all common stocks", "# offset = const_df.shape[0] - 1 # else: # rows_to_get = nrows #", "-- run in parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings: jobs.append((gv,", "smallest marketcap stocks # just get first of year dates, then get company", "is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price", "in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to use pgpass", "# https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not pgpass working", "', '.join([\"'\" + s + \"'\" for s in common_securities['gvkey']]) + ')' #", "for tpci are common or ADR, which are stocks you can buy common_stocks", "# TODO: multiprocessing to speed up # takes about 10s for nasdaq 100", "'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd',", "by name; library also required. default library is the compstat lib download entire", "this is the securities data db -- has historical price data for securities", "non-null object cusip 1000000 non-null object conm 1000000 non-null object curcddv 5861 non-null", "db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date columns to datetime df['from'] = pd.to_datetime(df['from'],", "where changes were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def", "should just merge or something # for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] ==", "seem to be weird tickers; buyouts or something # ignore stocks on canadian", "82 non-null object divdpaydate 5691 non-null object divsppaydate 128 non-null object paydate 5772", "matplotlib.pyplot as plt import numpy as np import pandas_market_calendars as mcal import pandas", "gv in remaining_gvs[start:ch + chunk_size]] start = time.time() jobs = [] # 10", "object cik 922655 non-null object fic 1000000 non-null object dtypes: float64(20), object(21) memory", "security data # goes through all securities and downloads historical price data #", "pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date # gives", "= 'comp' table = 'idxcst_his' # check if any new rows df_filepath =", "wrong...') return True, nrows else: print('db needs updating') return False, nrows def download_index_constituents(db,", "WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() #", "# gvkeys_str = '(' + ', '.join([\"'\" + s + \"'\" for s", "smallest SPY strategy from paper (see beat_market_analysis github repo) \"\"\" # merge historical", "securities[securities['gvkey'] == '010565'] # TODO: is it different/better to rebalance on a certain", "'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass #", "securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw", "with very slow sql query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities =", "seems like 5 simultaneous queries is max -- run in parallel with ThreadPoolExecutor(max_workers=5)", "for 2k # took 1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all", "at earliest date for index date_range = np.array(sorted(date_range)) date_range = date_range[date_range >= start]", "NaT with tomorrow's date # gives copy warning but can't get rid of", "really long time... # # df = db.raw_sql('select {} from {}.{} WHERE gvkey", "table -- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store", "= current_stocks[['gvkey', 'iid']] # company names # current_tickers = current_stocks['co_tic'] # company tickers", "use this. # full data query only took a few seconds even with", "float64 exchg 1000000 non-null float64 secstat 1000000 non-null object tpci 1000000 non-null object", "divd 5694 non-null float64 divdpaydateind 0 non-null object divsp 129 non-null float64 dvrated", "{}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data():", "# repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH", "= sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out", "8 cores; default is 5 per CPU # seems like 5 simultaneous queries", "print('took', int(end - start), 'seconds') # takes about 2h linearly # dfs =", "if update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df", "current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at number of constituents as", "+ 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data starts in", "5 per CPU # seems like 5 simultaneous queries is max -- run", "= sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] /", "per CPU # seems like 5 simultaneous queries is max -- run in", "historical price data for securities cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD", "memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd',", "for compustat (comp) are: security names_ix idxcst_his .h5 files have same name as", "# company names # current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies", "(see beat_market_analysis github repo) \"\"\" # merge historical constituents for sp600 with daily", "is fast to download because this is a small table... # no need", "search -- no longer needed # gvkeys = single_idx_df['gvkey'].values # create dataframe with", "about 2h linearly # dfs = [] # for gv in tqdm(sp600_gvkeys_strings): #", "date already downloaded and use sql query to gegt updates; then save to", "short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp',", "# takes about 2h linearly # dfs = [] # for gv in", "non-null object adrrc 4202 non-null float64 ajexdi 999696 non-null float64 cshoc 439670 non-null", "= pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # # get new rows plus the", "df = db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his': # converts date columns", "= np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies = OrderedDict() # constituent_tickers =", "works with out-of-order columns # TODO: find next stock in bottom 20 at", "df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern')", "bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with acquisitions: dlrsni 01 is", "(chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey',", "SQL query: WHERE IN # # should be like ('item 1', 'item 2',", "cap data # see what returns are on yearly rebalance for 20 smallest", "low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to", "const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd',", "hour, min, sec to 0s # TODO: if not latest date; use date", "1000000 entries, 0 to 999999 Data columns (total 41 columns): gvkey 1000000 non-null", "tables downloaded 9-12: sec_shortint security secd secd is about 39GB in a pandas", "currently in the hdf file # rows_to_get = nrows - const_df.shape[0] + 1", "pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df; only ticker, gvkey, and iid sec_short", "+ 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del big_df gc.collect() # 30 seconds", "object conm 1000000 non-null object curcddv 5861 non-null object capgn 29 non-null float64", "unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to", "WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a", "df_filepath, table=table, library=library) if up_to_date: return df = db.get_table(library=library, table=table, obs=nrows) if table", "smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: #", "security data db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his') # market cap/price,", "hdf_settings = {'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key':", "securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird tickers; buyouts or", "= const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities for ticker symbol securities =", "paydate 5772 non-null object recorddate 2906 non-null object curcdd 999696 non-null object adrrc", "about 20m for 2k # took 1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\"", "idxcst_his has the historical index constituents \"\"\" import os import gc import time", "with Wed instead of Ltd which I don't know what it is) SP1500:", "# merge historical constituents for sp600 with daily price, eps, and market cap", "if date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest", "= \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents then looks at", "full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted", "the # # last one currently in the hdf file # rows_to_get =", "I think last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1] # get stocks missing", "constituent_tickers = OrderedDict() lengths = [] # TODO: multiprocessing to speed up #", "= common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df", "price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom", "full data query only took a few seconds even with 1M rows #", "OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get latest price if stopped trading during", "price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns, annualized return =", "'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is it different/better to rebalance", "= db.raw_sql('select * from comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets", "of rows:', nrows) #db.describe_table(library, tablename) # nrows = 1000000 if tablename == 'secd':", "# last one currently in the hdf file # rows_to_get = nrows -", "tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing # df = db.raw_sql('select {} from", "# to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey',", "'iid']] # company names # current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string] =", "0 else: last_idx = bottom_20_price_chg.index[-1] # get stocks missing from price changes, and", "float64 prccd 999696 non-null float64 prchd 986959 non-null float64 prcld 985637 non-null float64", "table=table) if nrows == current_rows: print('up to date') return True, nrows elif nrows", "= time.time() # df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use),", "seem to have weird dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if not", "above and below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO: # need", "SP500: S&P 500 Comp-Ltd (there's another one with Wed instead of Ltd which", "* from comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq", "last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd']", "def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if current rows is less than", "**hdf_settings_table) del current_df del securities del df del common_df del common_securities gc.collect() def", "current_rows = 0 nrows = db.get_row_count(library=library, table=table) if nrows == current_rows: print('up to", "common_securities['gvkey']]) + ')' # if you want to count how many rows are", "returns, annualized return = ' + str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout()", "to use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) #", "because it is too huge -- expect it to be about 100GB in", "dl in chunks because it is too huge -- expect it to be", "get latest price if stopped trading during the year; figure out mergers/buyouts, etc", "about 10s for nasdaq 100 for d in tqdm(date_range): # if date is", "* df['prccd'] # TODO: create file for storing all updated data and append", "each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close']", "stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure only one", "# cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath)", "CPU # seems like 5 simultaneous queries is max -- run in parallel", "= single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']] #", "'secd': # this is the securities data db -- has historical price data", "so we can ignore most of those middle columns cols_to_use = ['ajexdi', 'cshoc',", "plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']],", "price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO:", "= const_df.shape[0] - 1 # else: # rows_to_get = nrows # offset =", "cshoc 439670 non-null float64 cshtrd 999677 non-null float64 dvi 379938 non-null float64 eps", "**hdf_settings) # need to join up with other dataframe maybe, for now, just", "('item 1', 'item 2', 'item 3') # gvkeys_str = '(' + ', '.join([\"'\"", "p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values())", "obs=100) # gets acquisition spending; aqcy column df4 = db.raw_sql('select * from comp.fundq", "is # 030824 for sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df =", "20 smallest marketcap stocks # just get first of year dates, then get", "time... # # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use),", "in constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO:", "db is fast to download because this is a small table... # no", "= pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make string for", "strings out of gvkeys for SQL query start = ch if ch +", "stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data", "bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt,", "enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first make strings out of gvkeys for", "it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others", "3') # gvkeys_str = '(' + ', '.join([\"'\" + s + \"'\" for", "float64 cheqv 85 non-null float64 div 5780 non-null float64 divd 5694 non-null float64", "# gives copy warning but can't get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1),", "names_ix idxcst_his .h5 files have same name as table \"\"\" df_filepath = FILEPATH", "last_idx = bottom_20_price_chg.index[-1] # get stocks missing from price changes, and use last", "const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get % price change", "df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] = df['cshoc'] * df['prccd'] #", "# these seem to be weird tickers; buyouts or something # ignore stocks", "of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets all dates", "from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how", "be about 100GB in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps',", "tables like 'security', check if any more rows and grab new stuff, or", "db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) #", "for SQL query start = ch if ch + chunk_size > len(remaining_gvs): gvkeys_strings", "last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid # TODO:", "# write existing data as hdf table -- first time only # current_df.to_hdf(secd_filename,", "df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up with other", "-- has historical price data for securities cols_to_use = ['ajexdi', # Adjusted Price", "import gc import time import datetime from collections import OrderedDict from concurrent.futures import", "get todays date and reset hour, min, sec to 0s # TODO: if", "obs=100) # index constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily data db.get_table('comp', 'secd',", "is 0 or F) if update=True, will get latest date in current df,", "current_df del securities del df del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp',", "on yearly rebalance for 20 smallest marketcap stocks # just get first of", "one with Wed instead of Ltd which I don't know what it is)", "gegt updates; then save to HDF5 for tables like 'security', check if any", "copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del big_df gc.collect() #", "({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really long time...", "with tomorrow's date # gives copy warning but can't get rid of it...", "and do when saving the file # df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru']", "gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop columns", "caps # get smallest 20 market caps, get close price # get close", "db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth') # simplified financial statement extract", "only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df", "“Understanding the Data” on page 91 and on (chapter 6) 'cshoc', # shares", "dfs del big_df gc.collect() # 30 seconds per 50 -- should take about", "for i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs)", "== 'secd': # this is the securities data db -- has historical price", "gvkeys for SQL query start = ch if ch + chunk_size > len(remaining_gvs):", "remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical security data # goes", "common or ADR, which are stocks you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])]", "update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df =", "for bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged =", "iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df", "# get gvkeys for tpci 0 or F # ends up with very", "for s in common_securities['gvkey']]) + ')' # if you want to count how", "return df = db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his': # converts date", "= db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename", "df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]),", "more than one result, may need to filter by iid too price_chg_dict['adj_close'] =", "check that no tickers are used for multiple companies # get unique dates", "see what returns are on yearly rebalance for 20 smallest marketcap stocks #", "found!\") return # convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market", "get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx += 1", "non-null object cik 922655 non-null object fic 1000000 non-null object dtypes: float64(20), object(21)", "data query only took a few seconds even with 1M rows # query_str", "db.get_row_count(library=library, table=table) if nrows == current_rows: print('up to date') return True, nrows elif", "isn't more than one result, may need to filter by iid too price_chg_dict['adj_close']", "acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni',", "+ 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1]", "by row, and can't get it working anyhow # need to figure out", "a few above and below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO:", "price_chg_dict['iid'] = iid # TODO: check this isn't more than one result, may", "and only update it \"\"\" library = 'comp' table = 'idxcst_his' # check", "# converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'],", "download_index_constituents() else: print('not one of predefined tables to download') def check_if_up_to_date(db, df_filepath, table,", "number in current db;') print('something is wrong...') return True, nrows else: print('db needs", "smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get latest price if stopped", "do when saving the file # df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] =", "added, but do later # if update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf'", "pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities for", "= nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date for index date_range =", "turns out the db is fast to download because this is a small", "= pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! #", "309295 non-null float64 prccd 999696 non-null float64 prchd 986959 non-null float64 prcld 985637", "obs=nrows, offset=offset) # converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru']", "= bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in here", ".h5 files have same name as table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table)", "del df gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else: print('not one of predefined", "were removed on 'thru', so if it is the 'thru' date, then shouldn't", "+ gv + \"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" +", "# compustat data # short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp',", "pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead of", "table, library='comp'): \"\"\" checks if current rows is less than rows in db;", "tablename) # nrows = 1000000 if tablename == 'secd': # this is the", "\"\"\" downloads full security data history for sp600 I think this was actually", "not just sp600. TODO: get latest date and download updates \"\"\" df =", "prchd 986959 non-null float64 prcld 985637 non-null float64 prcod 224624 non-null float64 prcstd", "1M rows # query_str = 'select count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date)", "= df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads table if needs", "import pandas_market_calendars as mcal import pandas as pd from tqdm import tqdm import", "df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename == 'sec_dprc': # need to", "df['gvkey'].unique() # I think 0 or F for tpci are common or ADR,", "tickers; buyouts or something # ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) #", "common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer need currency, all USD common_df.drop('curcdd', axis=1,", "index date_range = np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies = OrderedDict() #", "'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition spending; aqcy", "up_to_date: return df = db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his': # converts", "currency column # write existing data as hdf table -- first time only", "the file # df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) #", "'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] #", "years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates", "= {'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data',", "seconds per 50 -- should take about 20m for 2k # took 1282s", "= db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his': # converts date columns to", "curcddv 5861 non-null object capgn 29 non-null float64 cheqv 85 non-null float64 div", "print('number of rows:', nrows) #db.describe_table(library, tablename) # nrows = 1000000 if tablename ==", "common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer", "few hundred are missing in the daily data from the constituent list #", "# quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp',", "# this is the securities data db -- has historical price data for", "100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] ==", "# combine with securities for ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') #", "new things are added, but do later # if update: # cst_filepath =", "object dtypes: float64(20), object(21) memory usage: 312.8+ MB so we can ignore most", "return # repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks =", "strategy from paper (see beat_market_analysis github repo) \"\"\" # merge historical constituents for", "# close 'prchd', # high 'prcld', # low 'prcod', # open 'tic' #", "a clean function and do when saving the file # df['from'] = pd.to_datetime(df['from'],", "table = 'idxcst_his' # check if any new rows df_filepath = FILEPATH +", "see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 #", "gvkeys for sql search -- no longer needed # gvkeys = single_idx_df['gvkey'].values #", "cshtrd 999677 non-null float64 dvi 379938 non-null float64 eps 309295 non-null float64 epsmo", "jobs = [] # 10 threads per cpu for 8 cores; default is", "start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ; market cap", "library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() # print('took', int(end - start),", "current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows = 0 nrows = db.get_row_count(library=library,", "tpci column is 0 or F) if update=True, will get latest date in", "whole table if cant figure out what new stuff is \"\"\" nrows =", "stocks (tpci = 0 and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short,", "> d)] current_companies = current_stocks[['gvkey', 'iid']] # company names # current_tickers = current_stocks['co_tic']", "the overall price changes each year annualized_return = (np.prod([1 + p for p", "current rows is less than rows in db; returns True is up to", "'iid']) # get adjusted closes for constituents now and 1y in future const_current_price", "time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del", "market cap df['market_cap'] = df['cshoc'] * df['prccd'] # TODO: create file for storing", "const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to do this once, then", "ends up with very slow sql query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')", "updates; then save to HDF5 for tables like 'security', check if any more", "'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does not appear to", "grab whole table if cant figure out what new stuff is \"\"\" nrows", "not sure ] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv',", "index fundamentals db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp', 'secm', obs=100) # index", "for 8 cores; default is 5 per CPU # seems like 5 simultaneous", "idx_ann table in compd library -- need to rewrite query idxcst_his has the", "update=True, table='secd', library='comp'): \"\"\" downloads data for all common stocks (US and ADR,", "common stocks (tpci = 0 and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df =", "year later, calculate overall return # repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH", "are missing in the daily data from the constituent list # AIR ('001004')", "= securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird tickers; buyouts", "# colculate market cap df['market_cap'] = df['cshoc'] * df['prccd'] # TODO: create file", "pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically", "iid and gvkey -- should just merge or something # for gvkey in", "common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure only one iid per gvkey --", "price changes and check for bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing", "# gvkeys = df['gvkey'].unique() # I think 0 or F for tpci are", "exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure only one iid per gvkey", "constituent_utils.py \"\"\" gets historical constituents from WRDS file common indexes as represented in", "'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp',", "'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I think 0 or F for tpci", "+ 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del df gc.collect() elif tablename ==", "checks if size of table has changed; if so, downloads anew TODO: if", "'thru', so if it is the 'thru' date, then shouldn't be included #", "{} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see", "= OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get latest price", "+ '_min_part_' + str(i)), **hdf_settings) del df gc.collect() elif tablename == 'idxcst_his': download_index_constituents()", "# need to dl in chunks because it is too huge -- expect", "include stocks on 'from' date # use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks", "tickers are used for multiple companies # get unique dates where changes were", "key error in constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict()", "this in a clean function and do when saving the file # df['from']", "float64 prcld 985637 non-null float64 prcod 224624 non-null float64 prcstd 999696 non-null float64", "get stocks' gvkeys for sql search -- no longer needed # gvkeys =", "'tic' # ticker symbol ] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH +", "as pd from tqdm import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings =", "len(remaining_gvs) // 10 for i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): #", "in missing_gvkeys: last_idx += 1 # make an index for creating dataframe with", "# assume others not in here are 0 for now # get the", "save to HDF5 for tables like 'security', check if any more rows and", "'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe want to get iid", "= iid # TODO: check this isn't more than one result, may need", "== '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv + \"'\" for", "bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in here are 0 for now", "missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is it", "['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data” on", "rows:', nrows) #db.describe_table(library, tablename) # nrows = 1000000 if tablename == 'secd': #", "if up_to_date: return offset = 0 # turns out the db is fast", "'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with acquisitions: dlrsni 01", "db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table by name; library also", "\"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys =", "cpu for 8 cores; default is 5 per CPU # seems like 5", "securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal", "non-null float64 epsmo 309295 non-null float64 prccd 999696 non-null float64 prchd 986959 non-null", "has index gvkeyx and index name - comes from idx_ann table in compd", "(const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr]", "current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure", "# volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld',", "downloads historical price data # chunk through remaining gvkeys in 10 chunks chunk_size", "longer need currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] =", "# db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some exploration of the db lists", "closes for constituents now and 1y in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi',", "1000000 non-null object tpci 1000000 non-null object cik 922655 non-null object fic 1000000", "object anncdate 2776 non-null object capgnpaydate 29 non-null object cheqvpaydate 82 non-null object", "for gv in remaining_gvs[start:ch + chunk_size]] start = time.time() jobs = [] #", "{};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads full security", "= df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index,", "updating') return False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now; use", "df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if", "those middle columns cols_to_use = ['ajexdi', 'cshoc', # shares outstanding 'cshtrd', # volume", "to get iid too, not sure ] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate',", "pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker", "in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for gv, j in jobs:", "cores; default is 5 per CPU # seems like 5 simultaneous queries is", "# print('took', int(end - start), 'seconds') # takes about 2h linearly # dfs", "not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df", "['ajexdi', 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld',", "# takes a really long time... # # df = db.raw_sql('select {} from", "I think this was actually used to get all historical stock data actually,", "after that and add to current df \"\"\" # filename from first iteration", "out where new things are added, but do later # if update: #", "'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency column", "= [] for gv, j in jobs: # print(gv) dfs.append(j.result()) end = time.time()", "== gvkeyx].copy() # combine with securities for ticker symbol securities = pd.read_hdf(FILEPATH +", "dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if not df.shape[0] > 0: print(\"no", "were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\"", "for testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({})", "/ const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg", "downloaded and use sql query to gegt updates; then save to HDF5 for", "= m price_chg_dict['iid'] = iid # TODO: check this isn't more than one", "IN # # should be like ('item 1', 'item 2', 'item 3') #", "= bottom_20_price_chg.index[-1] # get stocks missing from price changes, and use last price", "update=False): \"\"\" obsolete for now; use download_small_table function instead gets historical index constituents", "'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical", "for index date_range = np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies = OrderedDict()", "nrows = db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library, tablename) # nrows =", "price if stopped trading during the year; figure out mergers/buyouts, etc # TODO:", "to log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries to", "idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm']", "'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\"", "= {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1: print('shit, iid", "'_min_part_' + str(i)), **hdf_settings) del df gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else:", "for gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from {}.{} WHERE gvkey", "current_rows: print('up to date') return True, nrows elif nrows < current_rows: print('number of", "WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks", "# get the overall price changes each year annualized_return = (np.prod([1 + p", "# goes through all securities and downloads historical price data # chunk through", "and grab new stuff, or just grab whole table if cant figure out", "sure only one iid per gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids", "= pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] *", "libraries, and within each library you can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq')", "= securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical security data", "can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be", "and use sql query to gegt updates; then save to HDF5 for tables", "df = db.raw_sql('select {} from {}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library,", "df4 = db.raw_sql('select * from comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\"", "need to update row by row, and can't get it working anyhow #", "good to go # TODO: put this in a clean function and do", "divdpaydateind 0 non-null object divsp 129 non-null float64 dvrated 2875 non-null float64 paydateind", "should take about 20m for 2k # took 1282s for 2127 gvkeys def", "+ \"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv +", "'idx_ann') # monthly security data db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his')", "None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date for index", "# drop currency column # write existing data as hdf table -- first", "= FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry", "[\"'\" + gv + \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings)", "TODO: deal with acquisitions: dlrsni 01 is acquired, 02 is bankrupt, 03 is", "is acquired, 02 is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys", "in a clean function and do when saving the file # df['from'] =", "with limit for testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey", "> 1: print('shit, iid length >1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] ==", "plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns, annualized return", "list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def", "# close 'prchd', # high 'prcld', # low 'prcod'] # open secd_cols =", "dataframe maybe, for now, just use gvkeyx which is # 030824 for sp600", "is it different/better to rebalance on a certain day/month? def secd_info(): \"\"\" info", "pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table,", "'a', 'append': True, 'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', #", "a few seconds even with 1M rows # query_str = 'select count(gvkey) from", "{} from {}.{} WHERE datadate > \\'{}\\';'# and gvkey IN {};' df =", "curcdd 999696 non-null object adrrc 4202 non-null float64 ajexdi 999696 non-null float64 cshoc", "cant figure out what new stuff is \"\"\" nrows = db.get_row_count(library, tablename) print('number", "# volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high", "bottom_20_price_chg.index[-1] # get stocks missing from price changes, and use last price to", "to do this once, then after it's saved, good to go # TODO:", "/ const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get %", "what it is) SP1500: S&P 1500 Super Composite NASDAQ 100: Nasdaq 100 \"\"\"", "volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', #", "too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price -", "index data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = []", "[] # 10 threads per cpu for 8 cores; default is 5 per", "non-null float64 exchg 1000000 non-null float64 secstat 1000000 non-null object tpci 1000000 non-null", "# get todays date and reset hour, min, sec to 0s # TODO:", "# shares outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', #", "in tqdm(date_range): # if date is within stock's from and thru, add to", "max -- run in parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings:", "big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del big_df gc.collect() # 30", "rows in db; returns True is up to date; False if not \"\"\"", "# # get new rows plus the last one to check it's the", "AJEXDI ); “Understanding the Data” on page 91 and on (chapter 6) 'cshoc',", "get historical security data # goes through all securities and downloads historical price", "stocks you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem", "FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return", "with ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs =", "for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security data from sec_dprc table", "jobs: # print(gv) dfs.append(j.result()) end = time.time() print('took', int(end - start), 'seconds') big_df", "= FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date", "tablename == 'idxcst_his': download_index_constituents() else: print('not one of predefined tables to download') def", "tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename,", "outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', #", "last_entry = const_df.iloc[-1] # # get new rows plus the last one to", "= db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop columns which seem to have", "and check for bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)]", "info of first 1M rows of secd: RangeIndex: 1000000 entries, 0 to 999999", "price # get close price a year later, calculate overall return # repeat", "in 2006 and 07 I think last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1]", "table checks if size of table has changed; if so, downloads anew TODO:", "02 is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in", "out, and see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() /", "df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del", "x is None).index, inplace=True) if not df.shape[0] > 0: print(\"no data to be", "# cutoff at earliest date for index date_range = np.array(sorted(date_range)) date_range = date_range[date_range", "comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents", "table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table,", "(np.prod([1 + p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100", "have weird dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if not df.shape[0] >", "0 or F for tpci are common or ADR, which are stocks you", "the constituent list # AIR ('001004') is not in common_stocks, figure out why", "HDF5 for tables like 'security', check if any more rows and grab new", "# last_entry = const_df.iloc[-1] # # get new rows plus the last one", "merge historical constituents for sp600 with daily price, eps, and market cap data", "this is a small table... # no need to update row by row,", "pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1: print('more", "seem to match historical data if date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern')", "# short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') # annual", "'iid', 'prccd', # close 'prchd', # high 'prcld', # low 'prcod', # open", "will try to find existing dataframe and only update it \"\"\" library =", "'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities for ticker symbol", "= db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate'])", "(chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid',", "securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I think 0", "appear to work properly. probably a sql ordering issue or something nobs =", "1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security data from sec_dprc", "df, then get everything after that and add to current df \"\"\" #", "= [] # for gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from", "df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up", "date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date for index date_range", "compustat table checks if size of table has changed; if so, downloads anew", "6) 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd',", "tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del", "constituents as a histogram; mostly 600 but a few above and below #", "rows is less than rows in db; returns True is up to date;", "return True, nrows else: print('db needs updating') return False, nrows def download_index_constituents(db, nrows=None,", "plt import numpy as np import pandas_market_calendars as mcal import pandas as pd", "is the securities data db -- has historical price data for securities cols_to_use", "new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table,", "repo) \"\"\" # merge historical constituents for sp600 with daily price, eps, and", "are common or ADR, which are stocks you can buy common_stocks = securities[securities['tpci'].isin(['0',", "num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use", "was actually used to get all historical stock data actually, not just sp600.", "saved, good to go # TODO: put this in a clean function and", "end=end) # gets only dates valid for NYSE -- doesn't seem to match", "True, 'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted Price", "secstat 1000000 non-null object tpci 1000000 non-null object cik 922655 non-null object fic", "tqdm(years[1:]): # first year starts on sept year_dates = [d for d in", "= pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db,", "OTC pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition spending; aqcy column df4 =", "= const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) /", "securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted from beat_market_analysis", "# high 'prcld', # low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def make_db_connection():", "del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {}", "or ADR, which are stocks you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index,", "number of constituents as a histogram; mostly 600 but a few above and", "price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx += 1 #", "ch if ch + chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" + gv +", "# common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] =", "may need to filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future']", "== m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg", "03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price changes and", "axis=1) # drop currency column # write existing data as hdf table --", "shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' #", "get everything after that and add to current df \"\"\" # filename from", "RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 41 columns): gvkey 1000000", "WHERE IN # # should be like ('item 1', 'item 2', 'item 3')", "the same as annual fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann') # monthly", "10 chunks chunk_size = len(remaining_gvs) // 10 for i, ch in enumerate(range(0, len(remaining_gvs)", "Midcap 400 Index SP500: S&P 500 Comp-Ltd (there's another one with Wed instead", "= constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes for constituents now and 1y", "download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data for all common stocks (US and", "const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr]", "bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc, like in 2006 and 07", "no tickers are used for multiple companies # get unique dates where changes", "current_df.shape[0] else: current_rows = 0 nrows = db.get_row_count(library=library, table=table) if nrows == current_rows:", "query: WHERE IN # # should be like ('item 1', 'item 2', 'item", "overall price changes each year annualized_return = (np.prod([1 + p for p in", "[\"'\" + gv + \"'\" for gv in remaining_gvs[start:ch + chunk_size]] start =", "to WRDS database need to enter credentials to log in \"\"\" wrds_uname =", "df['cshoc'] * df['prccd'] # TODO: create file for storing all updated data and", "return # convert datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap", "'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate',", "table \"\"\" dfs = [] for i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH", "make an index for creating dataframe with last price, so we can append", "# sp600 index data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days", "all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys)))", "'from' date, so include stocks on 'from' date # use dataframe masking date_string", "table=table, obs=nrows) if table == 'idxcst_his': # converts date columns to datetime df['from']", "represented in the idx_ann table: SP600: S&P Smallcap 600 Index SP400: S&P Midcap", "== gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'):", "all common stocks (US and ADR, or tpci column is 0 or F)", "is \"\"\" nrows = db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library, tablename) #", "gc import time import datetime from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor", "= [\"'\" + gv + \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string = ',", "\\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents then looks at \"\"\"", "+ 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename == 'sec_dprc': # need to dl", "as mcal import pandas as pd from tqdm import tqdm import wrds FILEPATH", "WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing #", "# make string for SQL query: WHERE IN # # should be like", "db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition spending;", "stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I think", "to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del df del common_df", "common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I", "change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close'])", "on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) #", "del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data for all", "of predefined tables to download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if", "'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid #", "next stock in bottom 20 at time the other was put out, and", "object curcdd 999696 non-null object adrrc 4202 non-null float64 ajexdi 999696 non-null float64", "nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return offset = 0 #", "lengths.append(current_companies.shape[0]) # look at number of constituents as a histogram; mostly 600 but", "if ch + chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" + gv + \"'\"", "# ticker symbol ] df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename", "100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns, annualized", "see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not", "data for all common stocks (US and ADR, or tpci column is 0", "= current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0 or F # ends up", "securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make string", "tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really long time... # # df =", "pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # # get new rows plus the last", "index]) return gvkeyx = gvkeyx[0] # TODO: get latest file # parse dates", "df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data", "# '1998-01-02' giving key error in constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict()", "'prcod', # open 'tic' # ticker symbol ] df = db.get_table(library, tablename, columns=cols_to_use,", "quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth')", "is True, then will try to find existing dataframe and only update it", "1): print('on part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH +", "9-12: sec_shortint security secd secd is about 39GB in a pandas df... TODO:", "= df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads", "list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx += 1 # make an index for", "'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return offset =", "100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if", "= [] for i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df", "a year later, calculate overall return # repeat ad nauseum # common_stocks =", "wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel':", "executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for gv,", "os.environ.get('wrds_password') # tries to use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db =", "object tpci 1000000 non-null object cik 922655 non-null object fic 1000000 non-null object", "close price # get close price a year later, calculate overall return #", "df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads table", "one result, may need to filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] ==", "\"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library)", "bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities,", "chunks chunk_size = len(remaining_gvs) // 10 for i, ch in enumerate(range(0, len(remaining_gvs) +", "elif nrows < current_rows: print('number of available rows is less than number in", "is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date for", "'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind',", "# print(gv) dfs.append(j.result()) end = time.time() print('took', int(end - start), 'seconds') big_df =", "== 'CAD'].index, inplace=True) # no longer need currency, all USD common_df.drop('curcdd', axis=1, inplace=True)", "db.get_table('comp', 'idxcst_his') # market cap/price, daily data db.get_table('comp', 'secd', obs=100) # OTC pricing", "only took a few seconds even with 1M rows # query_str = 'select", "df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks", "so include stocks on 'from' date # use dataframe masking date_string = d.strftime('%Y-%m-%d')", "time the other was put out, and see how it does smallest_20_1y_chg[datestr] =", "few above and below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO: #", "for gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in all securities", "tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() # print('took', int(end - start), 'seconds')", "def spy_20_smallest(): \"\"\" tries to implement 20 smallest SPY strategy from paper (see", "other dataframe maybe, for now, just use gvkeyx which is # 030824 for", "credentials, but not pgpass working # db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some", "S&P 1500 Super Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH +", "others not in here are 0 for now # get the overall price", "https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not pgpass working #", "del dfs del big_df gc.collect() # 30 seconds per 50 -- should take", "= bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with out-of-order columns #", "latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0 or F # ends", "del df del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df =", "missing from price changes, and use last price to get price change missing_gvkeys", "trfd 733884 non-null float64 exchg 1000000 non-null float64 secstat 1000000 non-null object tpci", "{};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() # print('took', int(end -", "return df def download_all_security_data(): \"\"\" downloads full security data history for sp600 I", "index for creating dataframe with last price, so we can append it to", "of constituents for each day start = single_idx_df['from'].min() # get todays date and", "as plt import numpy as np import pandas_market_calendars as mcal import pandas as", "library=library) if up_to_date: return offset = 0 # turns out the db is", "column df4 = db.raw_sql('select * from comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents():", "= db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)),", "nasdaq 100 for d in tqdm(date_range): # if date is within stock's from", "constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to implement 20 smallest SPY strategy from", "df; only ticker, gvkey, and iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df =", "grab new stuff, or just grab whole table if cant figure out what", "time.time() jobs = [] # 10 threads per cpu for 8 cores; default", "is about 39GB in a pandas df... TODO: get latest date already downloaded", "table, latest_date), date_cols=['datadate']) # drop columns which seem to have weird dates df.drop(df['prccd'].apply(lambda", "on page 91 and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', #", "a histogram; mostly 600 but a few above and below # pd.value_counts(lengths) #", "start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] ==", "10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really long time... # #", "get latest file # parse dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True)", "== 'idxcst_his': download_index_constituents() else: print('not one of predefined tables to download') def check_if_up_to_date(db,", "store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del df del common_df del common_securities", "# df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use),", "& (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']] # company names # current_tickers", ">1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd',", "need to rewrite query idxcst_his has the historical index constituents \"\"\" import os", "nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now; use download_small_table function instead", "columns which seem to have weird dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True)", "long one query takes -- about 2s # start = time.time() # df", "are added, but do later # if update: # cst_filepath = FILEPATH +", "# TODO: if not latest date; use date of datafile as latest end", "about 2s # start = time.time() # df = db.raw_sql('select {} from {}.{}", "like ('item 1', 'item 2', 'item 3') # gvkeys_str = '(' + ',", "print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0] # TODO: get latest file #", "= 0 and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid'])", "on sept year_dates = [d for d in sp600_dates if d.year == y]", "1000000 non-null object cusip 1000000 non-null object conm 1000000 non-null object curcddv 5861", "(PRCCD / AJEXDI ); “Understanding the Data” on page 91 and on (chapter", "required. default library is the compstat lib download entire table e.g. tablename='sec_shortint' tables", "if size of table has changed; if so, downloads anew TODO: if update", "+ chunk_size]] start = time.time() jobs = [] # 10 threads per cpu", "# TODO: get latest file # parse dates not working for hdf, parse_dates=['from',", "= df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need", "); “Understanding the Data” on page 91 and on (chapter 6) 'cshoc', #", "600 but a few above and below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show()", "\\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like some code", "idx_ann table: SP600: S&P Smallcap 600 Index SP400: S&P Midcap 400 Index SP500:", "to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) #", "columns (total 41 columns): gvkey 1000000 non-null object iid 1000000 non-null object datadate", "cheqvpaydate 82 non-null object divdpaydate 5691 non-null object divsppaydate 128 non-null object paydate", "in 10 chunks chunk_size = len(remaining_gvs) // 10 for i, ch in enumerate(range(0,", "'sec_dprc': # need to dl in chunks because it is too huge --", "common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH +", "df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with", "an index for creating dataframe with last price, so we can append it", "iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price =", "gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long one query", "credentials to log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass = os.environ.get('wrds_password') # tries", "NASDAQ 100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm']", "comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {} from {}.{}", "the other was put out, and see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg", "2776 non-null object capgnpaydate 29 non-null object cheqvpaydate 82 non-null object divdpaydate 5691", "-- about 2s # start = time.time() # df = db.raw_sql('select {} from", "from {}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db():", "on 'from' date, so include stocks on 'from' date # use dataframe masking", "db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp',", "in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None,", "gets historical index constituents from compustat table checks if size of table has", "big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)),", "= db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate'])", "it's the same as the # # last one currently in the hdf", "10s for nasdaq 100 for d in tqdm(date_range): # if date is within", "I don't know what it is) SP1500: S&P 1500 Super Composite NASDAQ 100:", "data from sec_dprc table \"\"\" dfs = [] for i in tqdm(range(1, 13)):", "'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth') # simplified", "table if cant figure out what new stuff is \"\"\" nrows = db.get_row_count(library,", "nrows = 1000000 if tablename == 'secd': # this is the securities data", "= set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to implement", "file for storing all updated data and append # used once to write", "nasdaq 100 constituents then looks at \"\"\" constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(date_range=None, index='Nasdaq 100')", "rebalance on a certain day/month? def secd_info(): \"\"\" info of first 1M rows", "'comp' table = 'idxcst_his' # check if any new rows df_filepath = FILEPATH", "as hdf table -- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to", "it is the 'thru' date, then shouldn't be included # but stocks were", "check if any new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows =", "query only took a few seconds even with 1M rows # query_str =", "non-null float64 eps 309295 non-null float64 epsmo 309295 non-null float64 prccd 999696 non-null", "# see what returns are on yearly rebalance for 20 smallest marketcap stocks", "= single_idx_df['gvkey'].values # create dataframe with list of constituents for each day start", "later # if update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath):", "in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90)", "999696 non-null float64 trfd 733884 non-null float64 exchg 1000000 non-null float64 secstat 1000000", "{} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end", "= pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to do this once, then after", "remaining gvkeys in 10 chunks chunk_size = len(remaining_gvs) // 10 for i, ch", "obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') # industry quarterly", "True is up to date; False if not \"\"\" if os.path.exists(df_filepath): current_df =", "500 Comp-Ltd (there's another one with Wed instead of Ltd which I don't", "rows_to_get = nrows # offset = 0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset)", "tpci 0 or F # ends up with very slow sql query; avoid", "non-null float64 prccd 999696 non-null float64 prchd 986959 non-null float64 prcld 985637 non-null", "**hdf_settings) del jobs del dfs del big_df gc.collect() # 30 seconds per 50", "# plt.show() # TODO: # need to check that no tickers are used", "also required. default library is the compstat lib download entire table e.g. tablename='sec_shortint'", "of Ltd which I don't know what it is) SP1500: S&P 1500 Super", "the hdf file # rows_to_get = nrows - const_df.shape[0] + 1 # offset", "== gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted from beat_market_analysis constituent_utils.py", "# no longer need currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern')", "+ ')' # if you want to count how many rows are there,", "make_db_connection(): \"\"\" creates connection to WRDS database need to enter credentials to log", "common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {} from", "const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close']", "for i, start in enumerate(range(0, nrows, nobs), 1): print('on part', str(i)) df =", "\"\"\" obsolete for now; use download_small_table function instead gets historical index constituents from", "price changes, and use last price to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey'])))", "= (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20]", "has changed; if so, downloads anew TODO: if update is True, then will", "price to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx", "iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price", "time import datetime from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot", "= df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv +", "price change for each const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] -", "'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del big_df gc.collect() # 30 seconds per", "df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1:", "added on 'from' date, so include stocks on 'from' date # use dataframe", "tries to implement 20 smallest SPY strategy from paper (see beat_market_analysis github repo)", "db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\"", "secd is about 39GB in a pandas df... TODO: get latest date already", "'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe want to get iid too, not", "to join up with other dataframe maybe, for now, just use gvkeyx which", "new rows plus the last one to check it's the same as the", "update row by row, and can't get it working anyhow # need to", "= const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) #", "float64 cshoc 439670 non-null float64 cshtrd 999677 non-null float64 dvi 379938 non-null float64", "an entire table by name; library also required. default library is the compstat", "from and thru, add to list # stocks were removed on 'thru', so", "**hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data for", "yearly rebalance for 20 smallest marketcap stocks # just get first of year", "const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get", "can ignore most of those middle columns cols_to_use = ['ajexdi', 'cshoc', # shares", "os import gc import time import datetime from collections import OrderedDict from concurrent.futures", "no longer needed # gvkeys = single_idx_df['gvkey'].values # create dataframe with list of", "\"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'],", "if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # # get", "we can ignore most of those middle columns cols_to_use = ['ajexdi', 'cshoc', #", "historical nasdaq 100 constituents then looks at \"\"\" constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(date_range=None, index='Nasdaq", "= const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True,", "== 0: # everything was acquired/bankrupt, etc, like in 2006 and 07 I", "full security data history for sp600 I think this was actually used to", "non-null object fic 1000000 non-null object dtypes: float64(20), object(21) memory usage: 312.8+ MB", "> 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks", "= [d for d in sp600_dates if d.year == y] first_days.append(min(year_dates)) # '1998-01-02'", "does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not", "close price a year later, calculate overall return # repeat ad nauseum #", "# df = db.raw_sql('select {} from {}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use),", "# annual db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth') # simplified financial", "for SQL query: WHERE IN # # should be like ('item 1', 'item", "i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) #", "the bottom_20_price_chg df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid)", "for these stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] ==", "float64 prchd 986959 non-null float64 prcld 985637 non-null float64 prcod 224624 non-null float64", "+ 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make string for SQL query:", "10000000 for i, start in enumerate(range(0, nrows, nobs), 1): print('on part', str(i)) df", "constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get", "on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20", "size of table has changed; if so, downloads anew TODO: if update is", "nrows < current_rows: print('number of available rows is less than number in current", "like 'security', check if any more rows and grab new stuff, or just", "for securities cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI );", "= list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical security data # goes through", "import matplotlib.pyplot as plt import numpy as np import pandas_market_calendars as mcal import", "df gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else: print('not one of predefined tables", "'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind',", "of year dates, then get company market caps # get smallest 20 market", "'funda') # seems to be the same as annual fundamentals # annual index", "library=library) if up_to_date: return df = db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his':", "5861 non-null object capgn 29 non-null float64 cheqv 85 non-null float64 div 5780", "common indexes as represented in the idx_ann table: SP600: S&P Smallcap 600 Index", "dataframe with last price, so we can append it to the bottom_20_price_chg df", "file # parse dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df =", "db.raw_sql('select {} from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string),", "to work properly. probably a sql ordering issue or something nobs = 10000000", "one currently in the hdf file # rows_to_get = nrows - const_df.shape[0] +", "# TODO: deal with acquisitions: dlrsni 01 is acquired, 02 is bankrupt, 03", "single_idx_df['gvkey'].values # create dataframe with list of constituents for each day start =", "everything was acquired/bankrupt, etc, like in 2006 and 07 I think last_idx =", "import numpy as np import pandas_market_calendars as mcal import pandas as pd from", "6) 'cshoc', # shares outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid',", "inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get % price change for each", "gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv + \"'\" for gv", "like in 2006 and 07 I think last_idx = 0 else: last_idx =", "this isn't more than one result, may need to filter by iid too", "object cusip 1000000 non-null object conm 1000000 non-null object curcddv 5861 non-null object", "FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date =", "'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't", "1', 'item 2', 'item 3') # gvkeys_str = '(' + ', '.join([\"'\" +", "prcod 224624 non-null float64 prcstd 999696 non-null float64 trfd 733884 non-null float64 exchg", "- start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate'])", "sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in all securities securities = pd.read_hdf(FILEPATH", "create dataframe with list of constituents for each day start = single_idx_df['from'].min() #", "usage: 312.8+ MB so we can ignore most of those middle columns cols_to_use", "gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in all securities securities", "# end = time.time() # print('took', int(end - start), 'seconds') # takes about", "and below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO: # need to", "db.get_table('otc', 'endofday', obs=100) # gets acquisition spending; aqcy column df4 = db.raw_sql('select *", "weird tickers; buyouts or something # ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True)", "does not appear to work properly. probably a sql ordering issue or something", "data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda')", "'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci',", "compd library -- need to rewrite query idxcst_his has the historical index constituents", "first of year dates, then get company market caps # get smallest 20", "dates valid for NYSE -- doesn't seem to match historical data if date_range", "10 threads per cpu for 8 cores; default is 5 per CPU #", "current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']]", "TODO: create file for storing all updated data and append # used once", "and within each library you can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') #", "for i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first make strings", "permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table by name;", "chunks because it is too huge -- expect it to be about 100GB", "stopped trading during the year; figure out mergers/buyouts, etc # TODO: get tickers", "names # current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string]", "make strings out of gvkeys for SQL query start = ch if ch", "# if update: # cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): #", "== start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out why a", "1M rows of secd: RangeIndex: 1000000 entries, 0 to 999999 Data columns (total", "changes each year annualized_return = (np.prod([1 + p for p in price_chg_1y.values()]) **", "warning but can't get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse =", "= nrows # offset = 0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset) #", "OrderedDict() lengths = [] # TODO: multiprocessing to speed up # takes about", "get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first year starts on sept year_dates =", "'.join(sp600_gvkeys_strings) # reads in all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys =", "ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0:", "not available until 1999 for these stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr]", "nrows == current_rows: print('up to date') return True, nrows elif nrows < current_rows:", "379938 non-null float64 eps 309295 non-null float64 epsmo 309295 non-null float64 prccd 999696", "takes -- about 2s # start = time.time() # df = db.raw_sql('select {}", "\"\"\" some exploration of the db lists libraries, and within each library you", "daily price, eps, and market cap data # see what returns are on", "object fic 1000000 non-null object dtypes: float64(20), object(21) memory usage: 312.8+ MB so", "function and do when saving the file # df['from'] = pd.to_datetime(df['from'], utc=True) #", "= pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] = df['cshoc'] * df['prccd'] # TODO:", "security secd secd is about 39GB in a pandas df... TODO: get latest", "instead of Ltd which I don't know what it is) SP1500: S&P 1500", "600 Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from WRDS", "of table has changed; if so, downloads anew TODO: if update is True,", "storing all updated data and append # used once to write data #", "100 for d in tqdm(date_range): # if date is within stock's from and", "securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) #", "'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0", "securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df; only ticker, gvkey, and", "price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1: print('shit,", "df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads", "price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns, annualized return = ' +", "existing dataframe and only update it \"\"\" library = 'comp' table = 'idxcst_his'", "set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to implement 20 smallest SPY", "= FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date:", "const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get % price change for each const_price_change =", "price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict,", "Price = (PRCCD / AJEXDI ); “Understanding the Data” on page 91 and", "999677 non-null float64 dvi 379938 non-null float64 eps 309295 non-null float64 epsmo 309295", "gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # takes a really", "date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from']", "# no need to update row by row, and can't get it working", "securities cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding", "= db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate'])", "if you want to count how many rows are there, use this. #", "+ 'hdf/security.hdf') # gvkeys = df['gvkey'].unique() # I think 0 or F for", "'idxcst_his': # converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] =", "# for gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from {}.{} WHERE", "removed on 'thru', so if it is the 'thru' date, then shouldn't be", "return db def list_libs_tables(): \"\"\" some exploration of the db lists libraries, and", "'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat',", "dfs.append(df) # testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey =", "add ticker and remove iid and gvkey -- should just merge or something", "= pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead", "float64 divdpaydateind 0 non-null object divsp 129 non-null float64 dvrated 2875 non-null float64", "# seems like 5 simultaneous queries is max -- run in parallel with", "inplace=True) # these seem to be weird tickers; buyouts or something # ignore", "list of constituents for each day start = single_idx_df['from'].min() # get todays date", "# TODO: find next stock in bottom 20 at time the other was", "2s # start = time.time() # df = db.raw_sql('select {} from {}.{} WHERE", "if len(gvkeyx) > 1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return", "= OrderedDict() lengths = [] # TODO: multiprocessing to speed up # takes", "just merge or something # for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey,", "anew TODO: if update is True, then will try to find existing dataframe", "data db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition", "db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an", "def make_db_connection(): \"\"\" creates connection to WRDS database need to enter credentials to", "# get stocks' gvkeys for sql search -- no longer needed # gvkeys", "bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with out-of-order columns # TODO:", "tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library,", "(US and ADR, or tpci column is 0 or F) if update=True, will", "check it's the same as the # # last one currently in the", "missing in the daily data from the constituent list # AIR ('001004') is", "'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', # low", "changes were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest():", "def testing_db(): \"\"\" looks like some code that tests some db functions and", "d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving key error in constituent_companies price_chg_1y =", "end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ; market cap not", "the year; figure out mergers/buyouts, etc # TODO: get tickers for start, end", "print('something is wrong...') return True, nrows else: print('db needs updating') return False, nrows", "db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads", "or just grab whole table if cant figure out what new stuff is", "latest date in current df, then get everything after that and add to", "get new rows plus the last one to check it's the same as", "1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0] # TODO: get", "non-null float64 cshtrd 999677 non-null float64 dvi 379938 non-null float64 eps 309295 non-null", "testing_db(): \"\"\" looks like some code that tests some db functions and explores", "company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at", "inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings)", "filename from first iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH", "\"\"\" downloads table if needs updating table can be a tablename in the", "# filename from first iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename =", "datadate to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] = df['cshoc']", "int(end - start), 'seconds') # takes about 2h linearly # dfs = []", "sql search -- no longer needed # gvkeys = single_idx_df['gvkey'].values # create dataframe", "'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', #", "price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid # TODO: check this isn't more than", "= pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql", "a certain day/month? def secd_info(): \"\"\" info of first 1M rows of secd:", "pandas as pd from tqdm import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings", "IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop columns which", "= last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid # TODO: check", "print('number of available rows is less than number in current db;') print('something is", "sp600 with daily price, eps, and market cap data # see what returns", "df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd',", "not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') #", "date_cols=['datadate']) # drop columns which seem to have weird dates df.drop(df['prccd'].apply(lambda x: x", "compustat data # short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq')", "in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in all securities securities =", "of secd: RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 41 columns):", "creates connection to WRDS database need to enter credentials to log in \"\"\"", "0 nrows = db.get_row_count(library=library, table=table) if nrows == current_rows: print('up to date') return", "sql query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] #", "giving key error in constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg =", "used once to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf',", "by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] =", "600 Index SP400: S&P Midcap 400 Index SP500: S&P 500 Comp-Ltd (there's another", "adjusted closes for constituents now and 1y in future const_current_price = full_const[['gvkey', 'iid',", "object cheqvpaydate 82 non-null object divdpaydate 5691 non-null object divsppaydate 128 non-null object", "and use last price to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m", "common ones for compustat (comp) are: security names_ix idxcst_his .h5 files have same", "common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df =", "can append it to the bottom_20_price_chg df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey']", "same as the # # last one currently in the hdf file #", "all security data from sec_dprc table \"\"\" dfs = [] for i in", "date, so include stocks on 'from' date # use dataframe masking date_string =", "issue or something nobs = 10000000 for i, start in enumerate(range(0, nrows, nobs),", "it working anyhow # need to figure out where new things are added,", "WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long one", "industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp',", "function instead gets historical index constituents from compustat table checks if size of", "= ', '.join(sp600_gvkeys_strings) # reads in all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')", "# just get first of year dates, then get company market caps #", "bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: # everything was", "224624 non-null float64 prcstd 999696 non-null float64 trfd 733884 non-null float64 exchg 1000000", "if it is the 'thru' date, then shouldn't be included # but stocks", "= time.time() # print('took', int(end - start), 'seconds') # takes about 2h linearly", "hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del big_df gc.collect()", "secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to WRDS database need to", "for tpci 0 or F # ends up with very slow sql query;", "non-null float64 cshoc 439670 non-null float64 cshtrd 999677 non-null float64 dvi 379938 non-null", "it to the bottom_20_price_chg df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values", "on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency column # write existing data", "to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern')", "'/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table =", "float64 eps 309295 non-null float64 epsmo 309295 non-null float64 prccd 999696 non-null float64", "library, table, latest_date), date_cols=['datadate']) # drop columns which seem to have weird dates", "other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div',", "def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data for all common stocks (US", "1 # offset = const_df.shape[0] - 1 # else: # rows_to_get = nrows", "something # for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey']", "constituents now and 1y in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price", "date_cols=['datadate']) # end = time.time() # print('took', int(end - start), 'seconds') # takes", "= df['gvkey'].unique() # I think 0 or F for tpci are common or", "# df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join", "39GB in a pandas df... TODO: get latest date already downloaded and use", "# secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df =", "'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get % price change for", "'mode': 'a', 'append': True, 'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi',", "query start = ch if ch + chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\"", "'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey', 'iid',", "drop=True) if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc, like in 2006", "Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from WRDS file", "gives copy warning but can't get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True)", "is less than rows in db; returns True is up to date; False", "names_ix has index gvkeyx and index name - comes from idx_ann table in", "\\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {} from {}.{} WHERE datadate > \\'{}\\';'#", "F for tpci are common or ADR, which are stocks you can buy", "def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents then looks at \"\"\" constituent_companies,", "+ str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout() plt.show() # to get tickers", "but not pgpass working # db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some exploration", "# gvkeys = single_idx_df['gvkey'].values # create dataframe with list of constituents for each", "to 999999 Data columns (total 41 columns): gvkey 1000000 non-null object iid 1000000", "plt.show() # TODO: # need to check that no tickers are used for", "columns # TODO: find next stock in bottom 20 at time the other", "def secd_info(): \"\"\" info of first 1M rows of secd: RangeIndex: 1000000 entries,", "import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w',", "slow sql query; avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])]", "{}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time()", "table... # no need to update row by row, and can't get it", "more rows and grab new stuff, or just grab whole table if cant", "query_str = 'select {} from {}.{} WHERE datadate > \\'{}\\';'# and gvkey IN", "sql ordering issue or something nobs = 10000000 for i, start in enumerate(range(0,", "db.get_table('comp', 'funda') # seems to be the same as annual fundamentals # annual", "if not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows", "of gvkeys for SQL query start = ch if ch + chunk_size >", "object(21) memory usage: 312.8+ MB so we can ignore most of those middle", "'funda') # industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index prices", "only need to do this once, then after it's saved, good to go", "doesn't seem to match historical data if date_range is None: date_range = nyse.valid_days(start_date=start.date(),", "per year') plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid'])", "os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows = 0 nrows =", "are on yearly rebalance for 20 smallest marketcap stocks # just get first", "not df.shape[0] > 0: print(\"no data to be found!\") return # convert datadate", "low 'prcod', # open 'tic' # ticker symbol ] df = db.get_table(library, tablename,", "pd.concat(dfs) # get only common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys", "# get only common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys =", "= ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod']", "from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df def", "end = time.time() # print('took', int(end - start), 'seconds') # takes about 2h", "WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents then", "+ s + \"'\" for s in common_securities['gvkey']]) + ')' # if you", "+ 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities for ticker", "change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx += 1 # make", "comes from idx_ann table in compd library -- need to rewrite query idxcst_his", "df \"\"\" # filename from first iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf'", "= 'idxcst_his' # check if any new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf'", "during the year; figure out mergers/buyouts, etc # TODO: get tickers for start,", "< current_rows: print('number of available rows is less than number in current db;')", "from {}.{} WHERE datadate > \\'{}\\';'# and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols,", "'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid'])", "to match historical data if date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else:", "is the compstat lib download entire table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint", "= const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] =", "abbreviated securities df; only ticker, gvkey, and iid sec_short = securities[['tic', 'gvkey', 'iid']]", "big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date", "doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead of hard copy", "infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to do this once,", "= start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate']", "memory usage: 312.8+ MB so we can ignore most of those middle columns", "get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100 constituents then looks at \"\"\" constituent_companies, unique_dates", "[] for gv, j in jobs: # print(gv) dfs.append(j.result()) end = time.time() print('took',", "column is 0 or F) if update=True, will get latest date in current", "# TODO: check this isn't more than one result, may need to filter", "is less than number in current db;') print('something is wrong...') return True, nrows", "date; use date of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0,", "future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']]", "date # gives copy warning but can't get rid of it... single_idx_df['thru'].fillna(end +", "to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys: last_idx +=", "simplified financial statement extract daily db.get_table('comp', 'funda') # seems to be the same", "float64 prcstd 999696 non-null float64 trfd 733884 non-null float64 exchg 1000000 non-null float64", "ThreadPoolExecutor(max_workers=5) as executor: for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = []", "iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1: print('shit, iid length >1')", "from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt import numpy as np import", "download_small_table function instead gets historical index constituents from compustat table checks if size", "if stopped trading during the year; figure out mergers/buyouts, etc # TODO: get", "to date; False if not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows =", "hdf table -- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf", "sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv + \"'\" for gv in", "0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date columns to datetime", "currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] *", "in current db;') print('something is wrong...') return True, nrows else: print('db needs updating')", "tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w', 'complib':", "sp600_dates if d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving key error in constituent_companies", "df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency column # write existing", "stuff is \"\"\" nrows = db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library, tablename)", "'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up with other dataframe maybe, for now,", "TODO: find next stock in bottom 20 at time the other was put", "= pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO:", "'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', # low 'prcod']", "y in tqdm(years[1:]): # first year starts on sept year_dates = [d for", "single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)] current_companies = current_stocks[['gvkey', 'iid']] # company", "else: print('db needs updating') return False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete", "pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath,", "want to count how many rows are there, use this. # full data", "valid for NYSE -- doesn't seem to match historical data if date_range is", "tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads full security data history", "SP600 stocks yearly returns, annualized return = ' + str(round(annualized_return, 1))) plt.ylabel('% return", "check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if current rows is less than rows", "bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with acquisitions: dlrsni 01 is acquired, 02", "date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)] current_companies", "hundred are missing in the daily data from the constituent list # AIR", "in current df, then get everything after that and add to current df", "'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted Price =", "this was actually used to get all historical stock data actually, not just", "const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # # get new rows plus", "# df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine", "volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe want to get", "you want to count how many rows are there, use this. # full", "non-null object iid 1000000 non-null object datadate 1000000 non-null object tic 1000000 non-null", "dataframe with list of constituents for each day start = single_idx_df['from'].min() # get", "cusip 1000000 non-null object conm 1000000 non-null object curcddv 5861 non-null object capgn", "0 or F) if update=True, will get latest date in current df, then", "= idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than 1 gvkeyx, exiting:')", "price, eps, and market cap data # see what returns are on yearly", "= db.get_row_count(library=library, table=table) if nrows == current_rows: print('up to date') return True, nrows", "only update it \"\"\" library = 'comp' table = 'idxcst_his' # check if", "within stock's from and thru, add to list # stocks were removed on", "in remaining_gvs[start:ch + chunk_size]] start = time.time() jobs = [] # 10 threads", "'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', #", "# else: # rows_to_get = nrows # offset = 0 df = db.get_table(library=library,", "NYSE -- doesn't seem to match historical data if date_range is None: date_range", "first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in", "wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials, but not pgpass working # db.create_pgpass_file() return db", "creating dataframe with last price, so we can append it to the bottom_20_price_chg", "dates, then get company market caps # get smallest 20 market caps, get", "for tables like 'security', check if any more rows and grab new stuff,", "once to write data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table)", "= sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out why a few hundred are", "object curcddv 5861 non-null object capgn 29 non-null float64 cheqv 85 non-null float64", "sure ] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip',", "numpy as np import pandas_market_calendars as mcal import pandas as pd from tqdm", "# look at number of constituents as a histogram; mostly 600 but a", "historical data if date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff", "gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads full security data history for", "end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date", "non-null float64 dvrated 2875 non-null float64 paydateind 2 non-null object anncdate 2776 non-null", "entire table by name; library also required. default library is the compstat lib", "/ AJEXDI ); “Understanding the Data” on page 91 and on (chapter 6)", "= '(' + ', '.join([\"'\" + s + \"'\" for s in common_securities['gvkey']])", "was acquired/bankrupt, etc, like in 2006 and 07 I think last_idx = 0", "sp600. TODO: get latest date and download updates \"\"\" df = pd.read_hdf(FILEPATH +", "] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate',", "marketcap stocks # just get first of year dates, then get company market", "work properly. probably a sql ordering issue or something nobs = 10000000 for", "# pd.value_counts(lengths) # plt.hist(lengths) # plt.show() # TODO: # need to check that", "non-null object tpci 1000000 non-null object cik 922655 non-null object fic 1000000 non-null", "'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate',", "'prccd', # close 'prchd', # high 'prcld', # low 'prcod'] # open secd_cols", "del current_df del securities del df del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey,", "get iid too, not sure ] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv',", "updating table can be a tablename in the library; common ones for compustat", "* common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove iid and", "'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high", "non-null object cheqvpaydate 82 non-null object divdpaydate 5691 non-null object divsppaydate 128 non-null", "data # df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep", "need to join up with other dataframe maybe, for now, just use gvkeyx", "# 10 threads per cpu for 8 cores; default is 5 per CPU", "as table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath,", "then after it's saved, good to go # TODO: put this in a", "datadate 1000000 non-null object tic 1000000 non-null object cusip 1000000 non-null object conm", "non-null float64 divdpaydateind 0 non-null object divsp 129 non-null float64 dvrated 2875 non-null", "= pd.concat(dfs) # get only common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') #", "dtypes: float64(20), object(21) memory usage: 312.8+ MB so we can ignore most of", "why a few hundred are missing in the daily data from the constituent", "def download_small_table(db, table, library='comp'): \"\"\" downloads table if needs updating table can be", "maybe, for now, just use gvkeyx which is # 030824 for sp600 #", "we can append it to the bottom_20_price_chg df price_chg_dict = {} iid =", "-- no longer needed # gvkeys = single_idx_df['gvkey'].values # create dataframe with list", "= current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0])", "fast to download because this is a small table... # no need to", "now and 1y in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price =", "connection to WRDS database need to enter credentials to log in \"\"\" wrds_uname", "table=table, obs=nrows, offset=offset) # converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True)", "make sure only one iid per gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey')", "sp600 index data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days =", "downloads data for all common stocks (US and ADR, or tpci column is", "> \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {} from {}.{} WHERE datadate >", "sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates)", ">= 1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for", "gvkey 1000000 non-null object iid 1000000 non-null object datadate 1000000 non-null object tic", "acquisition spending; aqcy column df4 = db.raw_sql('select * from comp.fundq WHERE gvkey =", "# turns out the db is fast to download because this is a", "which I don't know what it is) SP1500: S&P 1500 Super Composite NASDAQ", "# first make strings out of gvkeys for SQL query start = ch", "common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del df del common_df del common_securities gc.collect()", "minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date # gives copy", "figure out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid'])", "testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey = \\'001004\\' LIMIT", "tablename == 'secd': # this is the securities data db -- has historical", "constituent_companies = OrderedDict() # constituent_tickers = OrderedDict() lengths = [] # TODO: multiprocessing", "files have same name as table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date,", "only one iid per gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids =", "True, nrows elif nrows < current_rows: print('number of available rows is less than", "monthly security data db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his') # market", "use date of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern')", "'security', check if any more rows and grab new stuff, or just grab", "def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\" gets", "tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at number", "# TODO: get latest price if stopped trading during the year; figure out", "# tries to use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname,", "in common_stocks, figure out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data,", "# date_range = mcal.date_range(start=start, end=end) # gets only dates valid for NYSE --", "common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {} from {}.{} WHERE", "shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd',", "for gv, j in jobs: # print(gv) dfs.append(j.result()) end = time.time() print('took', int(end", "'askhi'], obs=100) # compustat data # short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly", "\"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv + \"'\"", "offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del df gc.collect() elif", "index prices daily db.get_table('comp', 'idx_mth') # simplified financial statement extract daily db.get_table('comp', 'funda')", "utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings)", "gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than 1 gvkeyx,", "full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes for constituents now and", "data as hdf table -- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends", "sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi']", "on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers", "TODO: is it different/better to rebalance on a certain day/month? def secd_info(): \"\"\"", "if up_to_date: return df = db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his': #", "returns are on yearly rebalance for 20 smallest marketcap stocks # just get", "with out-of-order columns # TODO: find next stock in bottom 20 at time", "less than rows in db; returns True is up to date; False if", "non-null float64 dvi 379938 non-null float64 eps 309295 non-null float64 epsmo 309295 non-null", "unique_dates def spy_20_smallest(): \"\"\" tries to implement 20 smallest SPY strategy from paper", "time.time() print('took', int(end - start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') #", "# raw sql to get historical security data # goes through all securities", "some code that tests some db functions and explores them \"\"\" df =", "# open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to WRDS database", "def download_all_security_data(): \"\"\" downloads full security data history for sp600 I think this", "get close price # get close price a year later, calculate overall return", "data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates", "+= 1 # make an index for creating dataframe with last price, so", "starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates =", "non-null object anncdate 2776 non-null object capgnpaydate 29 non-null object cheqvpaydate 82 non-null", "to figure out where new things are added, but do later # if", "plt.title('bottom 20 SP600 stocks yearly returns, annualized return = ' + str(round(annualized_return, 1)))", "annual fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp',", "spy_20_smallest(): \"\"\" tries to implement 20 smallest SPY strategy from paper (see beat_market_analysis", "the same as the # # last one currently in the hdf file", "df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') #", "-- doesn't seem to match historical data if date_range is None: date_range =", "= pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH", "to be weird tickers; buyouts or something # ignore stocks on canadian exchanges", "has the historical index constituents \"\"\" import os import gc import time import", "then shouldn't be included # but stocks were added on 'from' date, so", "Index SP400: S&P Midcap 400 Index SP500: S&P 500 Comp-Ltd (there's another one", "loads all security data from sec_dprc table \"\"\" dfs = [] for i", "missing_gvkeys: last_idx += 1 # make an index for creating dataframe with last", "** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20", "fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq') #", "entries, 0 to 999999 Data columns (total 41 columns): gvkey 1000000 non-null object", "each library you can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have", "sec_shortint security secd secd is about 39GB in a pandas df... TODO: get", "have same name as table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows", "inplace=True, axis=1) # drop currency column # write existing data as hdf table", "return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to implement 20 smallest SPY strategy", "microsecond=0, tzinfo=None).tz_localize('US/Eastern') # replace NaT with tomorrow's date # gives copy warning but", "gvkeys = single_idx_df['gvkey'].values # create dataframe with list of constituents for each day", "= time.time() jobs = [] # 10 threads per cpu for 8 cores;", "common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600", "are stocks you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these", "not in common_stocks, figure out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y =", "{}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long", "df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up with other dataframe maybe,", "current df, then get everything after that and add to current df \"\"\"", "1000000 non-null object cik 922655 non-null object fic 1000000 non-null object dtypes: float64(20),", "float64 paydateind 2 non-null object anncdate 2776 non-null object capgnpaydate 29 non-null object", "and download updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] ==", "= full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd']", "1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y", "library you can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission??", "per 50 -- should take about 20m for 2k # took 1282s for", "list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey']", "(total 41 columns): gvkey 1000000 non-null object iid 1000000 non-null object datadate 1000000", "= pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] # sp600 index data", "= last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()])", "'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe want", "= securities[securities['tpci'].isin(['0', 'F'])] # # make string for SQL query: WHERE IN #", "utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def", "tablename) print('number of rows:', nrows) #db.describe_table(library, tablename) # nrows = 1000000 if tablename", "gvkeys missing in price changes and check for bankruptcy or acquisitions, etc missing_gvkeys", "histogram; mostly 600 but a few above and below # pd.value_counts(lengths) # plt.hist(lengths)", "# shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic'", "then save to HDF5 for tables like 'security', check if any more rows", "= FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys", "data # short data db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') #", "+ 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return df", "tomorrow's date # gives copy warning but can't get rid of it... single_idx_df['thru'].fillna(end", "'data', 'mode': 'a', 'append': True, 'format': 'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use =", "constituent_companies[date_string] = current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at number of", "tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P", "cols_to_use = ['ajexdi', 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd',", "go # TODO: put this in a clean function and do when saving", "1000000 if tablename == 'secd': # this is the securities data db --", "than one result, may need to filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey']", "drop currency column # write existing data as hdf table -- first time", "from first iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH +", "gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() # print('took',", "join up with other dataframe maybe, for now, just use gvkeyx which is", "hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to", "# rows_to_get = nrows - const_df.shape[0] + 1 # offset = const_df.shape[0] -", "just grab whole table if cant figure out what new stuff is \"\"\"", "nrows = db.get_row_count(library=library, table=table) if nrows == current_rows: print('up to date') return True,", "the idx_ann table: SP600: S&P Smallcap 600 Index SP400: S&P Midcap 400 Index", "bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) > 1: print('shit, iid length >1') iid =", "full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd']", "TODO: multiprocessing to speed up # takes about 10s for nasdaq 100 for", "list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get historical security data # goes through all", "{};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing # df = db.raw_sql('select", "'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries():", "idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) >", "it \"\"\" library = 'comp' table = 'idxcst_his' # check if any new", "axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf',", "07 I think last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1] # get stocks", "- 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks", "be like ('item 1', 'item 2', 'item 3') # gvkeys_str = '(' +", "# gets all dates # date_range = mcal.date_range(start=start, end=end) # gets only dates", "# const_df = pd.read_hdf(cst_filepath) # last_entry = const_df.iloc[-1] # # get new rows", "multiprocessing to speed up # takes about 10s for nasdaq 100 for d", "data from the constituent list # AIR ('001004') is not in common_stocks, figure", "0 for now # get the overall price changes each year annualized_return =", "Super Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx", "0 or F # ends up with very slow sql query; avoid securities", "if any new rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db,", "index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with out-of-order columns # TODO: find", "str(i)), **hdf_settings) del df gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else: print('not one", "plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns, annualized return = ' + str(round(annualized_return,", "# doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead of hard", "1: print('shit, iid length >1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m)", "= FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date:", "volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld',", "iid too, not sure ] other_cols = ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate',", "market cap/price, daily data db.get_table('comp', 'secd', obs=100) # OTC pricing db.get_table('otc', 'endofday', obs=100)", "# gets acquisition spending; aqcy column df4 = db.raw_sql('select * from comp.fundq WHERE", "it is too huge -- expect it to be about 100GB in memory", "rows df_filepath = FILEPATH + 'hdf/idxcst_his.hdf' up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library)", "'complevel': 9} hdf_settings_table = {'key': 'data', 'mode': 'a', 'append': True, 'format': 'table', 'complib':", "constituents for sp600 with daily price, eps, and market cap data # see", "each year annualized_return = (np.prod([1 + p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values()))", "table if needs updating table can be a tablename in the library; common", "check for bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged", "import pandas as pd from tqdm import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/'", "can be a tablename in the library; common ones for compustat (comp) are:", "constituents \"\"\" import os import gc import time import datetime from collections import", "pandas df... TODO: get latest date already downloaded and use sql query to", "ordering issue or something nobs = 10000000 for i, start in enumerate(range(0, nrows,", "about 39GB in a pandas df... TODO: get latest date already downloaded and", "market cap data # see what returns are on yearly rebalance for 20", "1000000 non-null object iid 1000000 non-null object datadate 1000000 non-null object tic 1000000", "# constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at number of constituents as a", "({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate']) # see how long one query takes --", "one iid per gvkey -- not quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique()", "from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end =", "+ 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove iid and gvkey -- should", "collections import OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt import numpy", "database need to enter credentials to log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass", "is wrong...') return True, nrows else: print('db needs updating') return False, nrows def", "13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only common", "F) if update=True, will get latest date in current df, then get everything", "all dates # date_range = mcal.date_range(start=start, end=end) # gets only dates valid for", "non-null float64 cheqv 85 non-null float64 div 5780 non-null float64 divd 5694 non-null", "db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop columns which seem to have weird", "for creating dataframe with last price, so we can append it to the", "= bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with acquisitions: dlrsni 01 is acquired,", "= 0 nrows = db.get_row_count(library=library, table=table) if nrows == current_rows: print('up to date')", "j in jobs: # print(gv) dfs.append(j.result()) end = time.time() print('took', int(end - start),", "get adjusted closes for constituents now and 1y in future const_current_price = full_const[['gvkey',", "needs updating table can be a tablename in the library; common ones for", "\"\"\" # filename from first iteration # secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename", "filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg']", "exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0] # TODO: get latest file", "'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is it different/better to rebalance on a", "chunk_size > len(remaining_gvs): gvkeys_strings = [\"'\" + gv + \"'\" for gv in", "'seconds') # takes about 2h linearly # dfs = [] # for gv", "out why a few hundred are missing in the daily data from the", "else: gvkeys_strings = [\"'\" + gv + \"'\" for gv in remaining_gvs[start:ch +", "iid length >1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid']", "1000000 non-null object conm 1000000 non-null object curcddv 5861 non-null object capgn 29", "01 is acquired, 02 is bankrupt, 03 is liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get", "df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv + \"'\"", "missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic',", "('001004') is not in common_stocks, figure out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid'])", "= (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check", "= common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop", "'prcld', 'prcod', 'tic' # maybe want to get iid too, not sure ]", "2000 onward is [5:] ; market cap not available until 1999 for these", "tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ; market cap not available until", "i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)): # first make strings out", "included # but stocks were added on 'from' date, so include stocks on", "need to figure out where new things are added, but do later #", "through all securities and downloads historical price data # chunk through remaining gvkeys", "in all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs =", "non-null float64 paydateind 2 non-null object anncdate 2776 non-null object capgnpaydate 29 non-null", "= '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode': 'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table", "sp600_gvkeys_string), date_cols=['datadate']) # takes a really long time... # # df = db.raw_sql('select", "{} from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df", "need to do this once, then after it's saved, good to go #", "db.get_table(library, tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename ==", "explores them \"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date',", "1000000 non-null object tic 1000000 non-null object cusip 1000000 non-null object conm 1000000", "library, tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads full security data", "for d in sp600_dates if d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving key", "bottom 20 at time the other was put out, and see how it", "from comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical nasdaq 100", "def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {} from {}.{} WHERE gvkey =", "pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to", "import ThreadPoolExecutor import matplotlib.pyplot as plt import numpy as np import pandas_market_calendars as", "keep common stocks (tpci = 0 and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df", "sql to get historical security data # goes through all securities and downloads", "need to check that no tickers are used for multiple companies # get", "'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg', 'fic', 'gvkey',", "a really long time... # # df = db.raw_sql('select {} from {}.{} WHERE", "and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps',", "to check it's the same as the # # last one currently in", "price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() # TODO: get latest", "the historical index constituents \"\"\" import os import gc import time import datetime", "num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))]", "pass # with limit for testing # df = db.raw_sql('select {} from {}.{}", "iid 1000000 non-null object datadate 1000000 non-null object tic 1000000 non-null object cusip", "= ['ajexdi', 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd',", "data # chunk through remaining gvkeys in 10 chunks chunk_size = len(remaining_gvs) //", "'hdf/idxcst_his.hdf') # only need to do this once, then after it's saved, good", "downloaded 9-12: sec_shortint security secd secd is about 39GB in a pandas df...", "rows is less than number in current db;') print('something is wrong...') return True,", "np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv + \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string", "put this in a clean function and do when saving the file #", "end = time.time() print('took', int(end - start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] =", "Index SP500: S&P 500 Comp-Ltd (there's another one with Wed instead of Ltd", "update is True, then will try to find existing dataframe and only update", "'gvkey', 'iid', 'prccd', # close 'prchd', # high 'prcld', # low 'prcod', #", "tablename, columns=cols_to_use, obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename == 'sec_dprc':", "the compstat lib download entire table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security", "df = db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date columns to datetime df['from']", "SP1500: S&P 1500 Super Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH", "'bidlo', 'askhi'], obs=100) # compustat data # short data db.get_table('comp', 'sec_shortint', obs=100) #", "print('on part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename", "+ 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks (tpci", "time.time() # print('took', int(end - start), 'seconds') # takes about 2h linearly #", "data # goes through all securities and downloads historical price data # chunk", "'iid', 'prccd', # close 'prchd', # high 'prcld', # low 'prcod'] # open", "constituents from compustat table checks if size of table has changed; if so,", "used for multiple companies # get unique dates where changes were made unique_dates", "many rows are there, use this. # full data query only took a", "ticker, gvkey, and iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey',", "= securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys", "= nrows - const_df.shape[0] + 1 # offset = const_df.shape[0] - 1 #", "single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets all dates # date_range", "get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical", "actually, not just sp600. TODO: get latest date and download updates \"\"\" df", "df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to", "price data # chunk through remaining gvkeys in 10 chunks chunk_size = len(remaining_gvs)", "price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))]", "sp600_stocks['prccd'] # sp600 index data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique()", "len(remaining_gvs) + 1, chunk_size)): # first make strings out of gvkeys for SQL", "import datetime from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as", "spending; aqcy column df4 = db.raw_sql('select * from comp.fundq WHERE gvkey = \\'010519\\';')", "chunk_size)): # first make strings out of gvkeys for SQL query start =", "df... TODO: get latest date already downloaded and use sql query to gegt", "shares outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close", "count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {}", "# annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta')", "takes about 2h linearly # dfs = [] # for gv in tqdm(sp600_gvkeys_strings):", "is too huge -- expect it to be about 100GB in memory cols_to_use", "it to be about 100GB in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd',", "\"'\" for gv in remaining_gvs[start:ch + chunk_size]] start = time.time() jobs = []", "common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird tickers; buyouts or something #", "# df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings)", "TODO: get tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is", "5772 non-null object recorddate 2906 non-null object curcdd 999696 non-null object adrrc 4202", "drop columns which seem to have weird dates df.drop(df['prccd'].apply(lambda x: x is None).index,", "db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat", "common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove iid and gvkey", "const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1)", "0 # turns out the db is fast to download because this is", "cheqv 85 non-null float64 div 5780 non-null float64 divd 5694 non-null float64 divdpaydateind", "divsp 129 non-null float64 dvrated 2875 non-null float64 paydateind 2 non-null object anncdate", "historical constituents from WRDS file common indexes as represented in the idx_ann table:", "= pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0 or F", "utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf',", "pass def test_sql_queries(): pass # with limit for testing # df = db.raw_sql('select", "# low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection", "400 Index SP500: S&P 500 Comp-Ltd (there's another one with Wed instead of", "from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt import", "not appear to work properly. probably a sql ordering issue or something nobs", "gvkey, and iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid'])", "first 1M rows of secd: RangeIndex: 1000000 entries, 0 to 999999 Data columns", "is None).index, inplace=True) if not df.shape[0] > 0: print(\"no data to be found!\")", "github repo) \"\"\" # merge historical constituents for sp600 with daily price, eps,", "non-null object capgnpaydate 29 non-null object cheqvpaydate 82 non-null object divdpaydate 5691 non-null", "in enumerate(range(0, nrows, nobs), 1): print('on part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use,", "reset hour, min, sec to 0s # TODO: if not latest date; use", "year_dates = [d for d in sp600_dates if d.year == y] first_days.append(min(year_dates)) #", "constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end]", "long time... # # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN", "= np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" + gv + \"'\" for gv in sp600_gvkeys]", "try to find existing dataframe and only update it \"\"\" library = 'comp'", "year dates, then get company market caps # get smallest 20 market caps,", "def load_and_combine_sec_dprc(): \"\"\" loads all security data from sec_dprc table \"\"\" dfs =", "for NYSE -- doesn't seem to match historical data if date_range is None:", "up # takes about 10s for nasdaq 100 for d in tqdm(date_range): #", "are: security names_ix idxcst_his .h5 files have same name as table \"\"\" df_filepath", "== end] # TODO: figure out why a few hundred are missing in", "'ajexdi'], inplace=True, axis=1) # get % price change for each const_price_change = const_current_price.merge(const_future_price,", "stocks # just get first of year dates, then get company market caps", "= df['thru'].dt.tz_convert('US/Eastern') df.to_hdf(df_filepath, **hdf_settings) del df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\"", "all securities and downloads historical price data # chunk through remaining gvkeys in", "\"\"\" downloads data for all common stocks (US and ADR, or tpci column", "get tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:]", "# simplified financial statement extract daily db.get_table('comp', 'funda') # seems to be the", "gv + \"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\" + gv", "something nobs = 10000000 for i, start in enumerate(range(0, nrows, nobs), 1): print('on", "AIR ('001004') is not in common_stocks, figure out why full_const = constituents.merge(current_daily_data, on=['gvkey',", "and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate']) # drop", "== m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey']", "data # see what returns are on yearly rebalance for 20 smallest marketcap", "= \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like some", "non-null float64 divd 5694 non-null float64 divdpaydateind 0 non-null object divsp 129 non-null", "one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out why a few hundred", "compstat lib download entire table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd", "available rows is less than number in current db;') print('something is wrong...') return", "\"'\" for s in common_securities['gvkey']]) + ')' # if you want to count", "current_rows = current_df.shape[0] else: current_rows = 0 nrows = db.get_row_count(library=library, table=table) if nrows", "goes through all securities and downloads historical price data # chunk through remaining", "= constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes", "figure out mergers/buyouts, etc # TODO: get tickers for start, end in tqdm(zip(first_days[4:-1],", "or something # ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to", "const_df.iloc[-1] # # get new rows plus the last one to check it's", "['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate',", "name - comes from idx_ann table in compd library -- need to rewrite", "price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with out-of-order", "gvkey -- should just merge or something # for gvkey in tqdm(common_df['gvkey'].unique()): #", "that and add to current df \"\"\" # filename from first iteration #", "last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1] # get stocks missing from price", "datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data =", "\"\"\" downloads an entire table by name; library also required. default library is", "page 91 and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume", "where new things are added, but do later # if update: # cst_filepath", "df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] =", "+ '_min'), **hdf_settings) elif tablename == 'sec_dprc': # need to dl in chunks", "common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) #", "/ last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] = iid # TODO: check this isn't", "downloads full security data history for sp600 I think this was actually used", "'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return df =", "bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] #", "like some code that tests some db functions and explores them \"\"\" df", "d)] current_companies = current_stocks[['gvkey', 'iid']] # company names # current_tickers = current_stocks['co_tic'] #", "', '.join(sp600_gvkeys_strings) # reads in all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys", "else: print('not one of predefined tables to download') def check_if_up_to_date(db, df_filepath, table, library='comp'):", "gvkeyx and index name - comes from idx_ann table in compd library --", "for gv in gvkeys_strings: jobs.append((gv, executor.submit(get_stock_hist_df, gv))) dfs = [] for gv, j", "ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure only", "# takes about 10s for nasdaq 100 for d in tqdm(date_range): # if", "sp600_gvkeys_string), date_cols=['datadate']) # see how long one query takes -- about 2s #", "db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq') # annual", "small table... # no need to update row by row, and can't get", "constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes for", "aqcy column df4 = db.raw_sql('select * from comp.fundq WHERE gvkey = \\'010519\\';') def", "library; common ones for compustat (comp) are: security names_ix idxcst_his .h5 files have", "# AIR ('001004') is not in common_stocks, figure out why full_const = constituents.merge(current_daily_data,", "'010565'] # TODO: is it different/better to rebalance on a certain day/month? def", "# get gvkeys missing in price changes and check for bankruptcy or acquisitions,", "'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass # with limit for", "gv, j in jobs: # print(gv) dfs.append(j.result()) end = time.time() print('took', int(end -", "float64 prcod 224624 non-null float64 prcstd 999696 non-null float64 trfd 733884 non-null float64", "'w', 'complib': 'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data', 'mode': 'a', 'append': True,", "== index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] ==", "gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like", "than rows in db; returns True is up to date; False if not", "2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security data from sec_dprc table \"\"\"", "anncdate 2776 non-null object capgnpaydate 29 non-null object cheqvpaydate 82 non-null object divdpaydate", "or F) if update=True, will get latest date in current df, then get", "common stocks (US and ADR, or tpci column is 0 or F) if", "daily db.get_table('comp', 'funda') # seems to be the same as annual fundamentals #", "SPY strategy from paper (see beat_market_analysis github repo) \"\"\" # merge historical constituents", "str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_'", "data history for sp600 I think this was actually used to get all", "one query takes -- about 2s # start = time.time() # df =", "no need to update row by row, and can't get it working anyhow", "another one with Wed instead of Ltd which I don't know what it", "and 1y in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey',", "'fundq') # annual db.get_table('comp', 'funda') # industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp',", "tqdm import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key': 'data', 'mode':", "1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))] common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd']", "return False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now; use download_small_table", "pd from tqdm import tqdm import wrds FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/' hdf_settings = {'key':", "add to current df \"\"\" # filename from first iteration # secd_filename =", "common_stocks, figure out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey',", "file # rows_to_get = nrows - const_df.shape[0] + 1 # offset = const_df.shape[0]", "limit for testing # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN", "128 non-null object paydate 5772 non-null object recorddate 2906 non-null object curcdd 999696", "'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass", "= common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove", "<gh_stars>0 \"\"\" names_ix has index gvkeyx and index name - comes from idx_ann", "library also required. default library is the compstat lib download entire table e.g.", "# stocks were removed on 'thru', so if it is the 'thru' date,", "iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get", "# make an index for creating dataframe with last price, so we can", "print('db needs updating') return False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for", "caps, get close price # get close price a year later, calculate overall", "print('not one of predefined tables to download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\"", "# appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del df", "obsolete for now; use download_small_table function instead gets historical index constituents from compustat", "const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg =", "in common_securities['gvkey']]) + ')' # if you want to count how many rows", "cap df['market_cap'] = df['cshoc'] * df['prccd'] # TODO: create file for storing all", "write existing data as hdf table -- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table)", "to the bottom_20_price_chg df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if", "object adrrc 4202 non-null float64 ajexdi 999696 non-null float64 cshoc 439670 non-null float64", "get unique dates where changes were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return", "; market cap not available until 1999 for these stocks datestr = start.strftime('%Y-%m-%d')", "sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy() #", "'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\" +", "executor.submit(get_stock_hist_df, gv))) dfs = [] for gv, j in jobs: # print(gv) dfs.append(j.result())", "+ 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks (tpci = 0 and F)", "adrrc 4202 non-null float64 ajexdi 999696 non-null float64 cshoc 439670 non-null float64 cshtrd", "library is the compstat lib download entire table e.g. tablename='sec_shortint' tables downloaded 9-12:", "obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del df gc.collect()", "as a histogram; mostly 600 but a few above and below # pd.value_counts(lengths)", "\"\"\" dfs = [] for i in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH +", "'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'],", "'prcod', 'tic' # maybe want to get iid too, not sure ] other_cols", "nrows else: print('db needs updating') return False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\"", "get gvkeys for tpci 0 or F # ends up with very slow", "np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies = OrderedDict() # constituent_tickers = OrderedDict()", "db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings)", "# current_tickers = current_stocks['co_tic'] # company tickers constituent_companies[date_string] = current_companies # constituent_tickers[date_string] =", "available until 1999 for these stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data", "m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] =", "# 2000 onward is [5:] ; market cap not available until 1999 for", "latest file # parse dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df", "'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass # with limit for testing #", "tablename == 'sec_dprc': # need to dl in chunks because it is too", "2906 non-null object curcdd 999696 non-null object adrrc 4202 non-null float64 ajexdi 999696", "fundamentals db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp', 'secm', obs=100) # index constituents", "- comes from idx_ann table in compd library -- need to rewrite query", "cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf' # if os.path.exists(cst_filepath): # const_df = pd.read_hdf(cst_filepath) #", "how many rows are there, use this. # full data query only took", "db lists libraries, and within each library you can list tables \"\"\" db.list_libraries()", "# saves credentials, but not pgpass working # db.create_pgpass_file() return db def list_libs_tables():", "# adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from WRDS file common", "const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True,", "latest date; use date of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0,", "huge -- expect it to be about 100GB in memory cols_to_use = ['ajexdi',", "= check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return df = db.get_table(library=library, table=table, obs=nrows)", "index constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily data db.get_table('comp', 'secd', obs=100) #", "load_and_combine_sec_dprc(): \"\"\" loads all security data from sec_dprc table \"\"\" dfs = []", "check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return df = db.get_table(library=library, table=table, obs=nrows) if", "common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove iid", "print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only common stocks securities", "gc.collect() # 30 seconds per 50 -- should take about 20m for 2k", "30 seconds per 50 -- should take about 20m for 2k # took", "has historical price data for securities cols_to_use = ['ajexdi', # Adjusted Price =", "# add ticker and remove iid and gvkey -- should just merge or", "nrows=None, update=False): \"\"\" obsolete for now; use download_small_table function instead gets historical index", "+ pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets all dates # date_range =", "made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries", "out why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) #", "gets historical nasdaq 100 constituents then looks at \"\"\" constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(date_range=None,", "merge or something # for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker']", "# monthly security data db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his') #", "'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1: print('more than 1", "iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1]", "# industry quarterly db.get_table('comp', 'aco_indstq') # annual db.get_table('comp', 'aco_indsta') # index prices daily", "+ \"'\" for gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in", "129 non-null float64 dvrated 2875 non-null float64 paydateind 2 non-null object anncdate 2776", "object recorddate 2906 non-null object curcdd 999696 non-null object adrrc 4202 non-null float64", "def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table by name; library also required.", "1000000 non-null object dtypes: float64(20), object(21) memory usage: 312.8+ MB so we can", "length >1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] ==", "non-null object tic 1000000 non-null object cusip 1000000 non-null object conm 1000000 non-null", "'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', #", "nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date for index date_range = np.array(sorted(date_range))", "== y] first_days.append(min(year_dates)) # '1998-01-02' giving key error in constituent_companies price_chg_1y = OrderedDict()", "'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI );", "hdf_settings_table = {'key': 'data', 'mode': 'a', 'append': True, 'format': 'table', 'complib': 'blosc', 'complevel':", "pandas_market_calendars as mcal import pandas as pd from tqdm import tqdm import wrds", "just get first of year dates, then get company market caps # get", "pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] = df['cshoc'] * df['prccd'] # TODO: create", "pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH +", "common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc']", "missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is it different/better to rebalance on", "{}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df) # testing", "db; returns True is up to date; False if not \"\"\" if os.path.exists(df_filepath):", "ignore most of those middle columns cols_to_use = ['ajexdi', 'cshoc', # shares outstanding", "return per year') plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey',", "recorddate 2906 non-null object curcdd 999696 non-null object adrrc 4202 non-null float64 ajexdi", "df['from'] = df['from'].dt.tz_convert('US/Eastern') # df['thru'] = df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) #", "to dl in chunks because it is too huge -- expect it to", "paper (see beat_market_analysis github repo) \"\"\" # merge historical constituents for sp600 with", "non-null object divdpaydate 5691 non-null object divsppaydate 128 non-null object paydate 5772 non-null", "'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does not appear to work properly. probably", "time.time() # df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library,", "db.get_table('comp', 'sec_shortint', obs=100) # quarterly fundamentals db.get_table('comp', 'fundq') # annual db.get_table('comp', 'funda') #", "use sql query to gegt updates; then save to HDF5 for tables like", "'item 2', 'item 3') # gvkeys_str = '(' + ', '.join([\"'\" + s", "until 1999 for these stocks datestr = start.strftime('%Y-%m-%d') constituents = constituent_companies[datestr] current_daily_data =", "were added on 'from' date, so include stocks on 'from' date # use", "plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid',", "through remaining gvkeys in 10 chunks chunk_size = len(remaining_gvs) // 10 for i,", "gv))) dfs = [] for gv, j in jobs: # print(gv) dfs.append(j.result()) end", "S&P 500 Comp-Ltd (there's another one with Wed instead of Ltd which I", "to current df \"\"\" # filename from first iteration # secd_filename = FILEPATH", "only ticker, gvkey, and iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short,", "be the same as annual fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann') #", "rows of secd: RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 41", "[] # TODO: multiprocessing to speed up # takes about 10s for nasdaq", "or tpci column is 0 or F) if update=True, will get latest date", "db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library, tablename) # nrows = 1000000 if", "# replace NaT with tomorrow's date # gives copy warning but can't get", "2006 and 07 I think last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1] #", "rows are there, use this. # full data query only took a few", "dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf')", "(last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if", "'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks (tpci = 0 and F) common_securities_short", "gvkeys for tpci 0 or F # ends up with very slow sql", "df_filepath, table, library='comp'): \"\"\" checks if current rows is less than rows in", "check this isn't more than one result, may need to filter by iid", "single_idx_df['from'].min() # get todays date and reset hour, min, sec to 0s #", "= check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return offset = 0 # turns", "def test_sql_queries(): pass # with limit for testing # df = db.raw_sql('select {}", "stocks were removed on 'thru', so if it is the 'thru' date, then", "gvkeys_str = '(' + ', '.join([\"'\" + s + \"'\" for s in", "to have weird dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if not df.shape[0]", "something # ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make", "ad nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf')", "# WARNING: does not appear to work properly. probably a sql ordering issue", "common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make string for SQL query: WHERE IN", "and F) common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True,", "default is 5 per CPU # seems like 5 simultaneous queries is max", "= pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with", "certain day/month? def secd_info(): \"\"\" info of first 1M rows of secd: RangeIndex:", "'select count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select", "if nrows == current_rows: print('up to date') return True, nrows elif nrows <", "so, downloads anew TODO: if update is True, then will try to find", "'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and remove iid and gvkey -- should just", "for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(),", "takes a really long time... # # df = db.raw_sql('select {} from {}.{}", "out-of-order columns # TODO: find next stock in bottom 20 at time the", "sept year_dates = [d for d in sp600_dates if d.year == y] first_days.append(min(year_dates))", "from compustat table checks if size of table has changed; if so, downloads", "just use gvkeyx which is # 030824 for sp600 # df2 = pd.read_hdf(FILEPATH", "df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library,", "common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))] # don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) #", "df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del df gc.collect() elif tablename", "from idx_ann table in compd library -- need to rewrite query idxcst_his has", "float64 ajexdi 999696 non-null float64 cshoc 439670 non-null float64 cshtrd 999677 non-null float64", "import OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt import numpy as", "avoid securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make", "mcal import pandas as pd from tqdm import tqdm import wrds FILEPATH =", "a pandas df... TODO: get latest date already downloaded and use sql query", "changes and check for bankruptcy or acquisitions, etc missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) missing =", "= df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency column # write", "for sp600 I think this was actually used to get all historical stock", "get stocks missing from price changes, and use last price to get price", "= 0 else: last_idx = bottom_20_price_chg.index[-1] # get stocks missing from price changes,", "'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat data", "db def list_libs_tables(): \"\"\" some exploration of the db lists libraries, and within", "different/better to rebalance on a certain day/month? def secd_info(): \"\"\" info of first", "date of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern') #", "const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates() const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change", "gvkeyx].copy() # combine with securities for ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')", "'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100) # compustat data # short data", "[d for d in sp600_dates if d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving", "\"\"\" checks if current rows is less than rows in db; returns True", "high 'prcld', # low 'prcod', # open 'tic' # ticker symbol ] df", "which seem to have weird dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if", "min, sec to 0s # TODO: if not latest date; use date of", "dfs.append(j.result()) end = time.time() print('took', int(end - start), 'seconds') big_df = pd.concat(dfs) big_df['datadate']", "tablename='secd'): df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename,", "with list of constituents for each day start = single_idx_df['from'].min() # get todays", "how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume", "pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass) # saves credentials,", "df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if", "datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str = 'select {} from {}.{} WHERE datadate", "date_range = mcal.date_range(start=start, end=end) # gets only dates valid for NYSE -- doesn't", "**hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads table if needs updating table can", "wrds_pass = os.environ.get('wrds_password') # tries to use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/", "**hdf_settings_table) # only keep common stocks (tpci = 0 and F) common_securities_short =", "current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for tpci 0 or", "from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from WRDS file common indexes as", "'1998-01-02' giving key error in constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg", "other was put out, and see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr]", "== index]) return gvkeyx = gvkeyx[0] # TODO: get latest file # parse", "'fic', 'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass", "non-null object curcdd 999696 non-null object adrrc 4202 non-null float64 ajexdi 999696 non-null", "OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt import numpy as np", "const_df.shape[0] + 1 # offset = const_df.shape[0] - 1 # else: # rows_to_get", "indexes as represented in the idx_ann table: SP600: S&P Smallcap 600 Index SP400:", "list # stocks were removed on 'thru', so if it is the 'thru'", "+ 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys))) # raw sql to get", "# with limit for testing # df = db.raw_sql('select {} from {}.{} WHERE", "'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe want to get iid too,", "query_str = 'select count(gvkey) from comp.secd where datadate > \\'{}\\';'.format(latest_date) # db.raw_sql(query_str) query_str", "start = time.time() jobs = [] # 10 threads per cpu for 8", "out mergers/buyouts, etc # TODO: get tickers for start, end in tqdm(zip(first_days[4:-1], first_days[5:])):", "outstanding 'cshtrd', # volume 'datadate', 'eps', 'prccd', 'prchd', 'prcld', 'prcod', 'tic' # maybe", "first_days.append(min(year_dates)) # '1998-01-02' giving key error in constituent_companies price_chg_1y = OrderedDict() smallest_20 =", "result, may need to filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0]", "download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now; use download_small_table function instead gets historical", "are used for multiple companies # get unique dates where changes were made", "of first 1M rows of secd: RangeIndex: 1000000 entries, 0 to 999999 Data", "'prchd', # high 'prcld', # low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def", "count how many rows are there, use this. # full data query only", "= bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)] missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565']", "in a pandas df... TODO: get latest date already downloaded and use sql", "canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure only one iid per", "stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer need currency, all USD common_df.drop('curcdd',", "\"\"\" nrows = db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library, tablename) # nrows", "date_range = np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies = OrderedDict() # constituent_tickers", "= full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] =", "= pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True) df['from'] = df['from'].dt.tz_convert('US/Eastern') df['thru'] = df['thru'].dt.tz_convert('US/Eastern')", "library='comp'): \"\"\" downloads an entire table by name; library also required. default library", "then get everything after that and add to current df \"\"\" # filename", "dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only common stocks securities =", "company market caps # get smallest 20 market caps, get close price #", "nrows # offset = 0 df = db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts", "think last_idx = 0 else: last_idx = bottom_20_price_chg.index[-1] # get stocks missing from", "# df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename,", "query to gegt updates; then save to HDF5 for tables like 'security', check", "pd.read_hdf(FILEPATH + 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with acquisitions:", "'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated',", "> 0: print(\"no data to be found!\") return # convert datadate to datetime64", "const_df[const_df['gvkeyx'] == gvkeyx].copy() # combine with securities for ticker symbol securities = pd.read_hdf(FILEPATH", "False, nrows def download_index_constituents(db, nrows=None, update=False): \"\"\" obsolete for now; use download_small_table function", "securities del df del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df", "gets only dates valid for NYSE -- doesn't seem to match historical data", "WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate']) return df def download_all_security_data(): \"\"\"", "obs=nrows) if table == 'idxcst_his': # converts date columns to datetime df['from'] =", "db.raw_sql('select * from comp.fundq WHERE gvkey = \\'010519\\';') def get_nasdaq_100_constituents(): \"\"\" gets historical", "str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey',", "nyse = mcal.get_calendar('NYSE') # gets all dates # date_range = mcal.date_range(start=start, end=end) #", "'table', 'complib': 'blosc', 'complevel': 9} secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD", "big_df gc.collect() # 30 seconds per 50 -- should take about 20m for", "= current_companies # constituent_tickers[date_string] = current_tickers lengths.append(current_companies.shape[0]) # look at number of constituents", "last price to get price change missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey']))) for m in missing_gvkeys:", "existing data as hdf table -- first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) #", "OrderedDict() # TODO: get latest price if stopped trading during the year; figure", "enumerate(range(0, nrows, nobs), 1): print('on part', str(i)) df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs,", "table=table, library=library) if up_to_date: return df = db.get_table(library=library, table=table, obs=nrows) if table ==", "'F'])] # # make string for SQL query: WHERE IN # # should", "1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies,", "'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys for sql search", "reads in all securities securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') all_gvkeys = securities['gvkey'].values remaining_gvs", "'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings) df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table) # only keep common stocks (tpci =", "5694 non-null float64 divdpaydateind 0 non-null object divsp 129 non-null float64 dvrated 2875", "db.create_pgpass_file() return db def list_libs_tables(): \"\"\" some exploration of the db lists libraries,", "= db.get_row_count(library, tablename) print('number of rows:', nrows) #db.describe_table(library, tablename) # nrows = 1000000", "'select {} from {}.{} WHERE datadate > \\'{}\\';'# and gvkey IN {};' df", "'CAD'].index, inplace=True) # no longer need currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate']", "1 # else: # rows_to_get = nrows # offset = 0 df =", "close 'prchd', # high 'prcld', # low 'prcod', # open 'tic' # ticker", "is) SP1500: S&P 1500 Super Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df =", "= ['adrrc', 'anncdate', 'capgn', 'capgnpaydate', 'cheqv', 'cheqvpaydate', 'curcdd', 'curcddv', 'cusip', 'datadate', 'div', 'divd',", "in tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get", "get it working anyhow # need to figure out where new things are", "get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities", "2k # took 1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security", "securities for ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df;", "securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks' gvkeys for", "when saving the file # df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'],", "constituent_companies[datestr] current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO:", "= {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate']) # end = time.time() # print('took', int(end", "tqdm(date_range): # if date is within stock's from and thru, add to list", "= db.get_table(library=library, table=table, obs=nrows, offset=offset) # converts date columns to datetime df['from'] =", "on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey',", "if needs updating table can be a tablename in the library; common ones", "day/month? def secd_info(): \"\"\" info of first 1M rows of secd: RangeIndex: 1000000", "'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] /", "updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys", "9} hdf_settings_table = {'key': 'data', 'mode': 'a', 'append': True, 'format': 'table', 'complib': 'blosc',", "the 'thru' date, then shouldn't be included # but stocks were added on", "need to enter credentials to log in \"\"\" wrds_uname = os.environ.get('wrds_username') wrds_pass =", "[] # for gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from {}.{}", "current df \"\"\" # filename from first iteration # secd_filename = FILEPATH +", "return True, nrows elif nrows < current_rows: print('number of available rows is less", "secd_filename = FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get", "of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs del big_df", "once, then after it's saved, good to go # TODO: put this in", "922655 non-null object fic 1000000 non-null object dtypes: float64(20), object(21) memory usage: 312.8+", "= full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0]", "'idxcst_his': download_index_constituents() else: print('not one of predefined tables to download') def check_if_up_to_date(db, df_filepath,", "because this is a small table... # no need to update row by", "[5:] ; market cap not available until 1999 for these stocks datestr =", "with 1M rows # query_str = 'select count(gvkey) from comp.secd where datadate >", "bottom_20_price_chg df price_chg_dict = {} iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values if len(iid) >", "non-null float64 div 5780 non-null float64 divd 5694 non-null float64 divdpaydateind 0 non-null", "is up to date; False if not \"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath)", "TODO: # need to check that no tickers are used for multiple companies", "shouldn't be included # but stocks were added on 'from' date, so include", "symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df; only ticker, gvkey,", "'iid']) common_df.drop('curcdd', inplace=True, axis=1) # drop currency column # write existing data as", "returns True is up to date; False if not \"\"\" if os.path.exists(df_filepath): current_df", "ADR, which are stocks you can buy common_stocks = securities[securities['tpci'].isin(['0', 'F'])] common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True)", "\"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'):", "annual db.get_table('comp', 'aco_indsta') # index prices daily db.get_table('comp', 'idx_mth') # simplified financial statement", "sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in all securities securities = pd.read_hdf(FILEPATH +", "to date') return True, nrows elif nrows < current_rows: print('number of available rows", "'from' date # use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <=", "there, use this. # full data query only took a few seconds even", "F) common_securities_short = common_securities[['gvkey', 'iid']] common_df = df.merge(common_securities_short, on=['gvkey', 'iid']) common_df.drop('curcdd', inplace=True, axis=1)", "+ p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(),", "non-null object curcddv 5861 non-null object capgn 29 non-null float64 cheqv 85 non-null", "for d in tqdm(date_range): # if date is within stock's from and thru,", "df del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select", "gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0] # TODO: get latest", "library='comp'): \"\"\" downloads table if needs updating table can be a tablename in", "e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd secd is about 39GB in", "and see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20", "year') plt.tight_layout() plt.show() # to get tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey',", "liquidated # https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm # get gvkeys missing in price changes and check for", "' + str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout() plt.show() # to get", "big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern') #", "= [] # TODO: multiprocessing to speed up # takes about 10s for", "FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') # get gvkeys for", "speed up # takes about 10s for nasdaq 100 for d in tqdm(date_range):", "in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique() first_days = [] sp600_dates = sorted(sp600_stocks['datadate'].unique())", "remove iid and gvkey -- should just merge or something # for gvkey", "# start = time.time() # df = db.raw_sql('select {} from {}.{} WHERE gvkey", "to make sure only one iid per gvkey -- not quite gvkey_grp =", "some exploration of the db lists libraries, and within each library you can", "tqdm(range(1, 13)): # print(i) dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i)))) df = pd.concat(dfs) # get only", "beat_market_analysis github repo) \"\"\" # merge historical constituents for sp600 with daily price,", "code that tests some db functions and explores them \"\"\" df = db.get_table('comp',", "need to filter by iid too price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] =", "# but stocks were added on 'from' date, so include stocks on 'from'", "here are 0 for now # get the overall price changes each year", "sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]): # first", "update it \"\"\" library = 'comp' table = 'idxcst_his' # check if any", "# need to join up with other dataframe maybe, for now, just use", "get only common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # gvkeys = df['gvkey'].unique()", "if so, downloads anew TODO: if update is True, then will try to", "non-null float64 prcld 985637 non-null float64 prcod 224624 non-null float64 prcstd 999696 non-null", "gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'): df = db.raw_sql('select {} from {}.{} WHERE gvkey", "# df['from'] = pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] =", "probably a sql ordering issue or something nobs = 10000000 for i, start", "date and download updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx']", "it is) SP1500: S&P 1500 Super Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df", "# use dataframe masking date_string = d.strftime('%Y-%m-%d') current_stocks = single_idx_df[(single_idx_df['from'] <= d) &", "and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'curcdd', 'datadate',", "think 0 or F for tpci are common or ADR, which are stocks", "= bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in here are 0 for", "column # write existing data as hdf table -- first time only #", "gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else: print('not one of predefined tables to", "'prcod'] # WARNING: does not appear to work properly. probably a sql ordering", "# dfs = [] # for gv in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select", "dynamically set date instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs", "# TODO: figure out why a few hundred are missing in the daily", "for constituents now and 1y in future const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']]", "history for sp600 I think this was actually used to get all historical", "get first of year dates, then get company market caps # get smallest", "price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in here are 0", "tablename in the library; common ones for compustat (comp) are: security names_ix idxcst_his", "# don't use CAD stocks common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True) # no longer need", "start = single_idx_df['from'].min() # get todays date and reset hour, min, sec to", "unique dates where changes were made unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies,", "object paydate 5772 non-null object recorddate 2906 non-null object curcdd 999696 non-null object", "list_libs_tables(): \"\"\" some exploration of the db lists libraries, and within each library", "the db lists libraries, and within each library you can list tables \"\"\"", "# ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check to make sure", "len(remaining_gvs): gvkeys_strings = [\"'\" + gv + \"'\" for gv in remaining_gvs[start:]] else:", "todays date and reset hour, min, sec to 0s # TODO: if not", "index name - comes from idx_ann table in compd library -- need to", "on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps',", "> \\'{}\\';'# and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate'])", "close 'prchd', # high 'prcld', # low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use)", "inplace=True) nyse = mcal.get_calendar('NYSE') # gets all dates # date_range = mcal.date_range(start=start, end=end)", "append works with out-of-order columns # TODO: find next stock in bottom 20", "tickers smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid']) securities =", "tables to download') def check_if_up_to_date(db, df_filepath, table, library='comp'): \"\"\" checks if current rows", "= OrderedDict() # TODO: get latest price if stopped trading during the year;", "price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works", "non-null object divsppaydate 128 non-null object paydate 5772 non-null object recorddate 2906 non-null", "# if date is within stock's from and thru, add to list #", "df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv),", "85 non-null float64 div 5780 non-null float64 divd 5694 non-null float64 divdpaydateind 0", "# TODO: put this in a clean function and do when saving the", "'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] # WARNING: does not appear to work", "beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from WRDS file common indexes as represented", "first make strings out of gvkeys for SQL query start = ch if", "# maybe want to get iid too, not sure ] other_cols = ['adrrc',", "object tic 1000000 non-null object cusip 1000000 non-null object conm 1000000 non-null object", "from paper (see beat_market_analysis github repo) \"\"\" # merge historical constituents for sp600", "FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return", "'.join([\"'\" + s + \"'\" for s in common_securities['gvkey']]) + ')' # if", "historical constituents for sp600 with daily price, eps, and market cap data #", "linearly # dfs = [] # for gv in tqdm(sp600_gvkeys_strings): # df =", "first year starts on sept year_dates = [d for d in sp600_dates if", "- price_chg_dict['adj_close']) / price_chg_dict['adj_close'] bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append", "secd: RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 41 columns): gvkey", "annualized return = ' + str(round(annualized_return, 1))) plt.ylabel('% return per year') plt.tight_layout() plt.show()", "Comp-Ltd (there's another one with Wed instead of Ltd which I don't know", "why full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get", "iid # TODO: check this isn't more than one result, may need to", "= (PRCCD / AJEXDI ); “Understanding the Data” on page 91 and on", "91 and on (chapter 6) 'cshoc', # shares outstanding 'cshtrd', # volume 'curcdd',", "ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df; only ticker,", "for now; use download_small_table function instead gets historical index constituents from compustat table", "sql query to gegt updates; then save to HDF5 for tables like 'security',", "s in common_securities['gvkey']]) + ')' # if you want to count how many", "df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100)", "# db.raw_sql(query_str) query_str = 'select {} from {}.{} WHERE datadate > \\'{}\\';'# and", "and market cap data # see what returns are on yearly rebalance for", "'(' + ', '.join([\"'\" + s + \"'\" for s in common_securities['gvkey']]) +", "/ const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'],", "latest date already downloaded and use sql query to gegt updates; then save", "'curcddv', 'cusip', 'datadate', 'div', 'divd', 'divdpaydate', 'divdpaydateind', 'divsp', 'divsppaydate', 'dvi', 'dvrated', 'epsmo', 'exchg',", "= [\"'\" + gv + \"'\" for gv in remaining_gvs[start:ch + chunk_size]] start", "on=['gvkey', 'iid']) # get stocks' gvkeys for sql search -- no longer needed", "calculate overall return # repeat ad nauseum # common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf')", "get gvkeys missing in price changes and check for bankruptcy or acquisitions, etc", "create file for storing all updated data and append # used once to", "exploration of the db lists libraries, and within each library you can list", "'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to do this", "same name as table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows =", "'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y') #", "= pd.to_datetime(df['from'], utc=True) # df['thru'] = pd.to_datetime(df['thru'], utc=True) # df['from'] = df['from'].dt.tz_convert('US/Eastern') #", "raw sql to get historical security data # goes through all securities and", "'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes for constituents now", "library, tablename), date_cols=['datadate']) def testing_db(): \"\"\" looks like some code that tests some", "* sp600_stocks['prccd'] # sp600 index data starts in 1994 years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >=", "df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if not df.shape[0] > 0: print(\"no data", "is [5:] ; market cap not available until 1999 for these stocks datestr", "db.list_tables('comp_global_daily') db.list_tables('comp') def download_entire_table(tablename, library='comp'): \"\"\" downloads an entire table by name; library", "d in sp600_dates if d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving key error", "price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0] price_chg_dict['adj_close_1y_future'] = last_price price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close'])", "secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the", "Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx =", "20 at time the other was put out, and see how it does", "# # last one currently in the hdf file # rows_to_get = nrows", "plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600 stocks yearly returns, annualized return = '", "= pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values if len(gvkeyx) > 1:", "eps, and market cap data # see what returns are on yearly rebalance", "now # get the overall price changes each year annualized_return = (np.prod([1 +", "in tqdm(sp600_gvkeys_strings): # df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use),", "= ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data”", "took 1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security data from", "const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] = bottom_20 bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True)", "elif tablename == 'sec_dprc': # need to dl in chunks because it is", "what new stuff is \"\"\" nrows = db.get_row_count(library, tablename) print('number of rows:', nrows)", "#db.describe_table(library, tablename) # nrows = 1000000 if tablename == 'secd': # this is", "on=['gvkey', 'iid']) full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid']) # get adjusted closes for constituents", "gvkeyx[0] # TODO: get latest file # parse dates not working for hdf,", "on a certain day/month? def secd_info(): \"\"\" info of first 1M rows of", "gets acquisition spending; aqcy column df4 = db.raw_sql('select * from comp.fundq WHERE gvkey", "query idxcst_his has the historical index constituents \"\"\" import os import gc import", "# TODO: # need to check that no tickers are used for multiple", "about 100GB in memory cols_to_use = ['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey',", "out what new stuff is \"\"\" nrows = db.get_row_count(library, tablename) print('number of rows:',", "['ajexdi', 'cshoc', 'cshtrd', 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', 'prchd', 'prcld', 'prcod'] #", "nrows) #db.describe_table(library, tablename) # nrows = 1000000 if tablename == 'secd': # this", "print('shit, iid length >1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) &", "take about 20m for 2k # took 1282s for 2127 gvkeys def load_and_combine_sec_dprc():", "# TODO: dynamically set date instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings)", "earliest date for index date_range = np.array(sorted(date_range)) date_range = date_range[date_range >= start] constituent_companies", "yearly returns, annualized return = ' + str(round(annualized_return, 1))) plt.ylabel('% return per year')", "# shares outstanding 'cshtrd', # volume 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close", "axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) # get % price change for each const_price_change", "download because this is a small table... # no need to update row", "any more rows and grab new stuff, or just grab whole table if", "5691 non-null object divsppaydate 128 non-null object paydate 5772 non-null object recorddate 2906", "offset = const_df.shape[0] - 1 # else: # rows_to_get = nrows # offset", "to get all historical stock data actually, not just sp600. TODO: get latest", "from sec_dprc table \"\"\" dfs = [] for i in tqdm(range(1, 13)): #", "then get company market caps # get smallest 20 market caps, get close", "data if date_range is None: date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at", "pd.read_hdf(FILEPATH + 'hdf/security.hdf') common_securities = securities[securities['tpci'].isin(['0', 'F'])] # # make string for SQL", "dates # date_range = mcal.date_range(start=start, end=end) # gets only dates valid for NYSE", "annual index fundamentals db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp', 'secm', obs=100) #", "set date instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del", "will get latest date in current df, then get everything after that and", "securities df; only ticker, gvkey, and iid sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df", "# OTC pricing db.get_table('otc', 'endofday', obs=100) # gets acquisition spending; aqcy column df4", "secd_info(): \"\"\" info of first 1M rows of secd: RangeIndex: 1000000 entries, 0", "gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))]", "current_companies = current_stocks[['gvkey', 'iid']] # company names # current_tickers = current_stocks['co_tic'] # company", "- const_price_change['adj_close']) / const_price_change['adj_close'] price_chg_1y[datestr] = const_price_change bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20] smallest_20[datestr] =", "a few hundred are missing in the daily data from the constituent list", "download entire table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd secd is", "quite gvkey_grp = common_stocks.groupby('gvkey') num_iids = gvkey_grp['iid'].nunique() num_iids.mean() num_iids[num_iids > 1] common_df =", "from price changes, and use last price to get price change missing_gvkeys =", "# Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data” on page", "db;') print('something is wrong...') return True, nrows else: print('db needs updating') return False,", "looks like some code that tests some db functions and explores them \"\"\"", "= df['thru'].dt.tz_convert('US/Eastern') # df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings) # need to join up with", "is within stock's from and thru, add to list # stocks were removed", "+ 'hdf/security.hdf') bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid']) # TODO: deal with acquisitions: dlrsni", "but can't get rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE')", "gv + \"'\" for gv in remaining_gvs[start:ch + chunk_size]] start = time.time() jobs", "converts date columns to datetime df['from'] = pd.to_datetime(df['from'], utc=True) df['thru'] = pd.to_datetime(df['thru'], utc=True)", "then will try to find existing dataframe and only update it \"\"\" library", "= pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings", "\"\"\" gets historical nasdaq 100 constituents then looks at \"\"\" constituent_companies, unique_dates =", "# TODO: check if append works with out-of-order columns # TODO: find next", "need to dl in chunks because it is too huge -- expect it", "pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets all dates # date_range = mcal.date_range(start=start,", "# TODO: is it different/better to rebalance on a certain day/month? def secd_info():", "print(gv) dfs.append(j.result()) end = time.time() print('took', int(end - start), 'seconds') big_df = pd.concat(dfs)", "TODO: if update is True, then will try to find existing dataframe and", "if any more rows and grab new stuff, or just grab whole table", "db -- has historical price data for securities cols_to_use = ['ajexdi', # Adjusted", "'hdf/security.hdf') # abbreviated securities df; only ticker, gvkey, and iid sec_short = securities[['tic',", "non-null object datadate 1000000 non-null object tic 1000000 non-null object cusip 1000000 non-null", "check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return offset = 0 # turns out", "= const_df.iloc[-1] # # get new rows plus the last one to check", "queries is max -- run in parallel with ThreadPoolExecutor(max_workers=5) as executor: for gv", "of those middle columns cols_to_use = ['ajexdi', 'cshoc', # shares outstanding 'cshtrd', #", "tpci 1000000 non-null object cik 922655 non-null object fic 1000000 non-null object dtypes:", "= current_df.shape[0] else: current_rows = 0 nrows = db.get_row_count(library=library, table=table) if nrows ==", "compustat (comp) are: security names_ix idxcst_his .h5 files have same name as table", "db.get_table('comp', 'idx_mth') # simplified financial statement extract daily db.get_table('comp', 'funda') # seems to", "only keep common stocks (tpci = 0 and F) common_securities_short = common_securities[['gvkey', 'iid']]", "constituents from WRDS file common indexes as represented in the idx_ann table: SP600:", "= 'select {} from {}.{} WHERE datadate > \\'{}\\';'# and gvkey IN {};'", "working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only", "= db.raw_sql('select {} from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename,", "it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets all dates #", "up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library) if up_to_date: return offset = 0", "- const_df.shape[0] + 1 # offset = const_df.shape[0] - 1 # else: #", "'prcld', # low 'prcod'] # open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates", "1500 Super Composite NASDAQ 100: Nasdaq 100 \"\"\" idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf')", "if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc, like in 2006 and", "data db -- has historical price data for securities cols_to_use = ['ajexdi', #", "everything after that and add to current df \"\"\" # filename from first", "that no tickers are used for multiple companies # get unique dates where", "pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!! # big_df['datadate'].dt.tz_localize('US/Eastern')", "market cap not available until 1999 for these stocks datestr = start.strftime('%Y-%m-%d') constituents", "start] one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end] # TODO: figure out why a few", "MB so we can ignore most of those middle columns cols_to_use = ['ajexdi',", "mostly 600 but a few above and below # pd.value_counts(lengths) # plt.hist(lengths) #", "'ticker'] = securities[securities['gvkey'] == gvkey]['tic'] def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'): # adapted", "error in constituent_companies price_chg_1y = OrderedDict() smallest_20 = OrderedDict() smallest_20_1y_chg = OrderedDict() #", "open secd_cols = ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to WRDS database need", "len(iid) > 1: print('shit, iid length >1') iid = iid[0] last_data = sp600_stocks[(sp600_stocks['gvkey']", "WRDS file common indexes as represented in the idx_ann table: SP600: S&P Smallcap", "'_min'), **hdf_settings) elif tablename == 'sec_dprc': # need to dl in chunks because", "rewrite query idxcst_his has the historical index constituents \"\"\" import os import gc", "# should be like ('item 1', 'item 2', 'item 3') # gvkeys_str =", "2', 'item 3') # gvkeys_str = '(' + ', '.join([\"'\" + s +", "for ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities df; only", "use gvkeyx which is # 030824 for sp600 # df2 = pd.read_hdf(FILEPATH +", "date_range = date_range[date_range >= start] constituent_companies = OrderedDict() # constituent_tickers = OrderedDict() lengths", "but a few above and below # pd.value_counts(lengths) # plt.hist(lengths) # plt.show() #", "non-null object capgn 29 non-null float64 cheqv 85 non-null float64 div 5780 non-null", "'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily data db.get_table('comp',", "db functions and explores them \"\"\" df = db.get_table('comp', 'security', obs=10) db.get_table('crsp', 'dsf',", "table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd secd is about 39GB", "{}.{} WHERE datadate > \\'{}\\';'# and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library,", "# for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] ==", "not latest date; use date of datafile as latest end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0,", "# annual index fundamentals db.get_table('comp', 'idx_ann') # monthly security data db.get_table('comp', 'secm', obs=100)", "outstanding 'cshtrd', # volume 'curcdd', 'datadate', 'eps', 'gvkey', 'iid', 'prccd', # close 'prchd',", "')' # if you want to count how many rows are there, use", "= 1000000 if tablename == 'secd': # this is the securities data db", "df.to_hdf(df_filepath, **hdf_settings) def download_small_table(db, table, library='comp'): \"\"\" downloads table if needs updating table", "the daily data from the constituent list # AIR ('001004') is not in", "const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))] bottom_20_price_chg.reset_index(inplace=True, drop=True) if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc, like", "the last one to check it's the same as the # # last", "set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique()) return constituent_companies, unique_dates def spy_20_smallest(): \"\"\" tries to implement 20", "if not df.shape[0] > 0: print(\"no data to be found!\") return # convert", "get the overall price changes each year annualized_return = (np.prod([1 + p for", "replace NaT with tomorrow's date # gives copy warning but can't get rid", "be a tablename in the library; common ones for compustat (comp) are: security", "current_rows: print('number of available rows is less than number in current db;') print('something", "5 simultaneous queries is max -- run in parallel with ThreadPoolExecutor(max_workers=5) as executor:", "sec_dprc table \"\"\" dfs = [] for i in tqdm(range(1, 13)): # print(i)", "used to get all historical stock data actually, not just sp600. TODO: get", "\"\"\" tries to implement 20 smallest SPY strategy from paper (see beat_market_analysis github", "309295 non-null float64 epsmo 309295 non-null float64 prccd 999696 non-null float64 prchd 986959", "in here are 0 for now # get the overall price changes each", "Wed instead of Ltd which I don't know what it is) SP1500: S&P", "# plt.hist(lengths) # plt.show() # TODO: # need to check that no tickers", "put out, and see how it does smallest_20_1y_chg[datestr] = bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum()", "latest_date), date_cols=['datadate']) # drop columns which seem to have weird dates df.drop(df['prccd'].apply(lambda x:", "const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1) const_future_price.drop(['prccd', 'ajexdi'], inplace=True,", "# took 1282s for 2127 gvkeys def load_and_combine_sec_dprc(): \"\"\" loads all security data", "datetime from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor import matplotlib.pyplot as plt", "threads per cpu for 8 cores; default is 5 per CPU # seems", "SP400: S&P Midcap 400 Index SP500: S&P 500 Comp-Ltd (there's another one with", "entire table e.g. tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd secd is about", "985637 non-null float64 prcod 224624 non-null float64 prcstd 999696 non-null float64 trfd 733884", "'dlrsni', 'tic']]) missing_merged[['tic', 'dlrsni']] securities[securities['gvkey'] == '010565'] # TODO: is it different/better to", "29 non-null float64 cheqv 85 non-null float64 div 5780 non-null float64 divd 5694", "+ 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename) latest_date = current_df['datadate'].max().strftime('%m/%d/%y')", "'date', 'bidlo', 'askhi'], obs=100) # compustat data # short data db.get_table('comp', 'sec_shortint', obs=100)", "{} from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate'])", "work!! # big_df['datadate'].dt.tz_localize('US/Eastern') # TODO: dynamically set date instead of hard copy big_df.to_hdf(FILEPATH", "common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) #", "if d.year == y] first_days.append(min(year_dates)) # '1998-01-02' giving key error in constituent_companies price_chg_1y", "end_date=end.date()).tz_convert('US/Eastern') else: # cutoff at earliest date for index date_range = np.array(sorted(date_range)) date_range", "common_df['market_cap'] = common_df['cshoc'] * common_df['prccd'] common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings) # add ticker and", "div 5780 non-null float64 divd 5694 non-null float64 divdpaydateind 0 non-null object divsp", "few seconds even with 1M rows # query_str = 'select count(gvkey) from comp.secd", "data db.get_table('comp', 'secm', obs=100) # index constituents db.get_table('comp', 'idxcst_his') # market cap/price, daily", "dfs = [] for gv, j in jobs: # print(gv) dfs.append(j.result()) end =", "price, so we can append it to the bottom_20_price_chg df price_chg_dict = {}", "= ','.join(secd_cols_to_use) def make_db_connection(): \"\"\" creates connection to WRDS database need to enter", "1: print('more than 1 gvkeyx, exiting:') print(idx_df[idx_df['conm'] == index]) return gvkeyx = gvkeyx[0]", "(sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid']", "float64(20), object(21) memory usage: 312.8+ MB so we can ignore most of those", "for gvkey in tqdm(common_df['gvkey'].unique()): # common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic']", "than number in current db;') print('something is wrong...') return True, nrows else: print('db", "non-null float64 trfd 733884 non-null float64 exchg 1000000 non-null float64 secstat 1000000 non-null", "'gvkey', 'iid', 'paydate', 'paydateind', 'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def", "sec_short = securities[['tic', 'gvkey', 'iid']] single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid']) # get stocks'", "True, then will try to find existing dataframe and only update it \"\"\"", "pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings =", "tablename='sec_shortint' tables downloaded 9-12: sec_shortint security secd secd is about 39GB in a", "if len(iid) > 1: print('shit, iid length >1') iid = iid[0] last_data =", "same as annual fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann') # monthly security", "secd secd is about 39GB in a pandas df... TODO: get latest date", "eps 309295 non-null float64 epsmo 309295 non-null float64 prccd 999696 non-null float64 prchd", "del securities del df del common_df del common_securities gc.collect() def get_stock_hist_df(gvkey, library='comp', tablename='secd'):", "pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') # only need to do this once, then after it's", "2h linearly # dfs = [] # for gv in tqdm(sp600_gvkeys_strings): # df", "adapted from beat_market_analysis constituent_utils.py \"\"\" gets historical constituents from WRDS file common indexes", "library = 'comp' table = 'idxcst_his' # check if any new rows df_filepath", "can list tables \"\"\" db.list_libraries() db.list_tables('zacks') db.list_tables('ciq') # don't have permission?? db.list_tables('comp_global_daily') db.list_tables('comp')", "# constituent_tickers = OrderedDict() lengths = [] # TODO: multiprocessing to speed up", "all historical stock data actually, not just sp600. TODO: get latest date and", "capgn 29 non-null float64 cheqv 85 non-null float64 div 5780 non-null float64 divd", "now, just use gvkeyx which is # 030824 for sp600 # df2 =", "{} from {}.{} WHERE gvkey = \\'001004\\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate']) def", "date and reset hour, min, sec to 0s # TODO: if not latest", "object divdpaydate 5691 non-null object divsppaydate 128 non-null object paydate 5772 non-null object", "data actually, not just sp600. TODO: get latest date and download updates \"\"\"", "for sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy()", "is 5 per CPU # seems like 5 simultaneous queries is max --", "to datetime64 df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern') # colculate market cap df['market_cap'] = df['cshoc'] *", "cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the", "stocks' gvkeys for sql search -- no longer needed # gvkeys = single_idx_df['gvkey'].values", "acquired/bankrupt, etc, like in 2006 and 07 I think last_idx = 0 else:", "rid of it... single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True) nyse = mcal.get_calendar('NYSE') # gets all", "2875 non-null float64 paydateind 2 non-null object anncdate 2776 non-null object capgnpaydate 29", "**hdf_settings) del df gc.collect() elif tablename == 'idxcst_his': download_index_constituents() else: print('not one of", "Smallcap 600 Index SP400: S&P Midcap 400 Index SP500: S&P 500 Comp-Ltd (there's", "saves credentials, but not pgpass working # db.create_pgpass_file() return db def list_libs_tables(): \"\"\"", "+ 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824'] sp600_gvkeys = np.unique(sp600_df['gvkey'].values) sp600_gvkeys_strings = [\"'\"", "df = pd.concat(dfs) # get only common stocks securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')", "to list # stocks were removed on 'thru', so if it is the", "year annualized_return = (np.prod([1 + p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) -", "# high 'prcld', # low 'prcod', # open 'tic' # ticker symbol ]", "no longer need currency, all USD common_df.drop('curcdd', axis=1, inplace=True) common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern') common_df['market_cap']", "multiple companies # get unique dates where changes were made unique_dates = set(single_idx_df['from'].unique())", "const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close']", "parse dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True) const_df = pd.read_hdf(FILEPATH +", "last one currently in the hdf file # rows_to_get = nrows - const_df.shape[0]", "print(\"no data to be found!\") return # convert datadate to datetime64 df['datadate'] =", "# need to check that no tickers are used for multiple companies #", "with other dataframe maybe, for now, just use gvkeyx which is # 030824", "secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf' secd_filename = FILEPATH + 'hdf/secd.hdf' current_df = pd.read_hdf(secd_filename)", "Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data” on page 91", "date_cols=['datadate']) return df def download_all_security_data(): \"\"\" downloads full security data history for sp600", "> len(remaining_gvs): gvkeys_strings = [\"'\" + gv + \"'\" for gv in remaining_gvs[start:]]", "instead gets historical index constituents from compustat table checks if size of table", "int(end - start), 'seconds') big_df = pd.concat(dfs) big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern') # big_df['datadate'] =", "cap not available until 1999 for these stocks datestr = start.strftime('%Y-%m-%d') constituents =", "buyouts or something # ignore stocks on canadian exchanges common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True) # check", "m]['iid'].values if len(iid) > 1: print('shit, iid length >1') iid = iid[0] last_data", "gvkeys in 10 chunks chunk_size = len(remaining_gvs) // 10 for i, ch in", "'prchd', # high 'prcld', # low 'prcod', # open 'tic' # ticker symbol", "[\"'\" + gv + \"'\" for gv in remaining_gvs[start:]] else: gvkeys_strings = [\"'\"", "in the hdf file # rows_to_get = nrows - const_df.shape[0] + 1 #", "= date_range[date_range >= start] constituent_companies = OrderedDict() # constituent_tickers = OrderedDict() lengths =", "'prcstd', 'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass # with", "'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings) del df gc.collect() elif tablename == 'idxcst_his':", "which is # 030824 for sp600 # df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf') single_idx_df", "'idx_mth') # simplified financial statement extract daily db.get_table('comp', 'funda') # seems to be", "download updates \"\"\" df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf') sp600_df = df[df['gvkeyx'] == '030824']", "non-null float64 prcod 224624 non-null float64 prcstd 999696 non-null float64 trfd 733884 non-null", "list # AIR ('001004') is not in common_stocks, figure out why full_const =", "datadate > \\'{}\\';'# and gvkey IN {};' df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date),", "on=['gvkey', 'iid']) # TODO: deal with acquisitions: dlrsni 01 is acquired, 02 is", "name as table \"\"\" df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table) up_to_date, nrows = check_if_up_to_date(db,", "non-null float64 secstat 1000000 non-null object tpci 1000000 non-null object cik 922655 non-null", "= const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi'] const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1)", "df gc.collect() def download_common_stock_price_history(db, update=True, table='secd', library='comp'): \"\"\" downloads data for all common", "+ 'hdf/common_us_stocks_daily_9-12-2018.hdf') sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf') sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd'] #", "\"\"\" pass def test_sql_queries(): pass # with limit for testing # df =", "== 'sec_dprc': # need to dl in chunks because it is too huge", "date_range[date_range >= start] constituent_companies = OrderedDict() # constituent_tickers = OrderedDict() lengths = []", "deal with acquisitions: dlrsni 01 is acquired, 02 is bankrupt, 03 is liquidated", "from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate']) # dfs.append(df) #", "jobs del dfs del big_df gc.collect() # 30 seconds per 50 -- should", "date_cols=['datadate']) # takes a really long time... # # df = db.raw_sql('select {}", "\"\"\" if os.path.exists(df_filepath): current_df = pd.read_hdf(df_filepath) current_rows = current_df.shape[0] else: current_rows = 0", "\"\"\" import os import gc import time import datetime from collections import OrderedDict", "if append works with out-of-order columns # TODO: find next stock in bottom", "'prccd']] const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future']", "are there, use this. # full data query only took a few seconds", "bottom_20_price_chg price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in here are", "= [] sp600_dates = sorted(sp600_stocks['datadate'].unique()) constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates) for y in tqdm(years[1:]):", "divdpaydate 5691 non-null object divsppaydate 128 non-null object paydate 5772 non-null object recorddate", "tries to use pgpass file; see here: # https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/ db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass)", "20 smallest SPY strategy from paper (see beat_market_analysis github repo) \"\"\" # merge", "'blosc', 'complevel': 9} hdf_settings_table = {'key': 'data', 'mode': 'a', 'append': True, 'format': 'table',", "OrderedDict() # constituent_tickers = OrderedDict() lengths = [] # TODO: multiprocessing to speed", "obs=nrows) df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings) elif tablename == 'sec_dprc': # need", "to be the same as annual fundamentals # annual index fundamentals db.get_table('comp', 'idx_ann')", "(1/len(price_chg_1y.values())) - 1) * 100 plt.plot(price_chg_1y.keys(), price_chg_1y.values()) plt.scatter(price_chg_1y.keys(), price_chg_1y.values()) plt.xticks(rotation=90) plt.title('bottom 20 SP600", "== iid)][['prccd', 'ajexdi']].dropna().iloc[-1] last_price = last_data['prccd'] / last_data['ajexdi'] price_chg_dict['gvkey'] = m price_chg_dict['iid'] =", "of available rows is less than number in current db;') print('something is wrong...')", "wrds_password=wrds_pass) # saves credentials, but not pgpass working # db.create_pgpass_file() return db def", "full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']] const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi'] const_future_price['adj_close_1y_future'] = const_future_price['prccd'] /", "extract daily db.get_table('comp', 'funda') # seems to be the same as annual fundamentals", "'recorddate', 'secstat', 'tic', 'tpci', 'trfd'] \"\"\" pass def test_sql_queries(): pass # with limit", "last one to check it's the same as the # # last one", "weird dates df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True) if not df.shape[0] > 0:", "d in tqdm(date_range): # if date is within stock's from and thru, add", "db.get_table(library=library, table=table, obs=nrows) if table == 'idxcst_his': # converts date columns to datetime", "\"'\" for gv in sp600_gvkeys] sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings) # reads in all", "# nrows = 1000000 if tablename == 'secd': # this is the securities", "first time only # current_df.to_hdf(secd_filename, **hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table)", "TODO: get latest price if stopped trading during the year; figure out mergers/buyouts,", "date instead of hard copy big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings) del jobs del dfs", "end] # TODO: figure out why a few hundred are missing in the", "db.raw_sql(query_str) query_str = 'select {} from {}.{} WHERE datadate > \\'{}\\';'# and gvkey", "'prchd', 'prcld', 'prcod'] # WARNING: does not appear to work properly. probably a", "like 5 simultaneous queries is max -- run in parallel with ThreadPoolExecutor(max_workers=5) as", "with securities for ticker symbol securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf') # abbreviated securities", "# create dataframe with list of constituents for each day start = single_idx_df['from'].min()", "**hdf_settings_table) # appends to hdf store common_df.to_hdf(secd_filename, **hdf_settings_table) del current_df del securities del" ]
[ "random class Stack(object): def __init__(self): self.h = [] def push(self, x): heapq.heappush(self.h, (-len(self.h),", "pop(self): if self.empty(): return None _, x = heapq.heappop(self.h) return x def empty(self):", "(-len(self.h), x)) def pop(self): if self.empty(): return None _, x = heapq.heappop(self.h) return", "self.h = [] def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty():", "= [] def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty(): return", "heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty(): return None _, x = heapq.heappop(self.h)", "x def empty(self): return len(self.h) == 0 def test(): items = [random.randrange(100) for", "in xrange(100)] s = Stack() for x in items: s.push(x) for x in", "items: s.push(x) for x in reversed(items): assert x == s.pop() print 'pass' def", "__init__(self): self.h = [] def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if", "push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty(): return None _, x", "return x def empty(self): return len(self.h) == 0 def test(): items = [random.randrange(100)", "x in reversed(items): assert x == s.pop() print 'pass' def main(): test() if", "= heapq.heappop(self.h) return x def empty(self): return len(self.h) == 0 def test(): items", "self.empty(): return None _, x = heapq.heappop(self.h) return x def empty(self): return len(self.h)", "class Stack(object): def __init__(self): self.h = [] def push(self, x): heapq.heappush(self.h, (-len(self.h), x))", "None _, x = heapq.heappop(self.h) return x def empty(self): return len(self.h) == 0", "return len(self.h) == 0 def test(): items = [random.randrange(100) for _ in xrange(100)]", "test(): items = [random.randrange(100) for _ in xrange(100)] s = Stack() for x", "x)) def pop(self): if self.empty(): return None _, x = heapq.heappop(self.h) return x", "for x in reversed(items): assert x == s.pop() print 'pass' def main(): test()", "empty(self): return len(self.h) == 0 def test(): items = [random.randrange(100) for _ in", "items = [random.randrange(100) for _ in xrange(100)] s = Stack() for x in", "[random.randrange(100) for _ in xrange(100)] s = Stack() for x in items: s.push(x)", "heapq.heappop(self.h) return x def empty(self): return len(self.h) == 0 def test(): items =", "reversed(items): assert x == s.pop() print 'pass' def main(): test() if __name__ ==", "return None _, x = heapq.heappop(self.h) return x def empty(self): return len(self.h) ==", "Stack() for x in items: s.push(x) for x in reversed(items): assert x ==", "[] def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty(): return None", "0 def test(): items = [random.randrange(100) for _ in xrange(100)] s = Stack()", "_, x = heapq.heappop(self.h) return x def empty(self): return len(self.h) == 0 def", "s.push(x) for x in reversed(items): assert x == s.pop() print 'pass' def main():", "= [random.randrange(100) for _ in xrange(100)] s = Stack() for x in items:", "x == s.pop() print 'pass' def main(): test() if __name__ == '__main__': main()", "def pop(self): if self.empty(): return None _, x = heapq.heappop(self.h) return x def", "xrange(100)] s = Stack() for x in items: s.push(x) for x in reversed(items):", "import heapq import random class Stack(object): def __init__(self): self.h = [] def push(self,", "if self.empty(): return None _, x = heapq.heappop(self.h) return x def empty(self): return", "x in items: s.push(x) for x in reversed(items): assert x == s.pop() print", "for x in items: s.push(x) for x in reversed(items): assert x == s.pop()", "def __init__(self): self.h = [] def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self):", "assert x == s.pop() print 'pass' def main(): test() if __name__ == '__main__':", "def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty(): return None _,", "Stack(object): def __init__(self): self.h = [] def push(self, x): heapq.heappush(self.h, (-len(self.h), x)) def", "in reversed(items): assert x == s.pop() print 'pass' def main(): test() if __name__", "x): heapq.heappush(self.h, (-len(self.h), x)) def pop(self): if self.empty(): return None _, x =", "def test(): items = [random.randrange(100) for _ in xrange(100)] s = Stack() for", "in items: s.push(x) for x in reversed(items): assert x == s.pop() print 'pass'", "for _ in xrange(100)] s = Stack() for x in items: s.push(x) for", "x = heapq.heappop(self.h) return x def empty(self): return len(self.h) == 0 def test():", "len(self.h) == 0 def test(): items = [random.randrange(100) for _ in xrange(100)] s", "def empty(self): return len(self.h) == 0 def test(): items = [random.randrange(100) for _", "import random class Stack(object): def __init__(self): self.h = [] def push(self, x): heapq.heappush(self.h,", "heapq import random class Stack(object): def __init__(self): self.h = [] def push(self, x):", "== 0 def test(): items = [random.randrange(100) for _ in xrange(100)] s =", "_ in xrange(100)] s = Stack() for x in items: s.push(x) for x", "= Stack() for x in items: s.push(x) for x in reversed(items): assert x", "s = Stack() for x in items: s.push(x) for x in reversed(items): assert" ]
[ "dict): return 'Object should be a dict' elif len(objects['weight']) != len(objects['value']): return 'weight", "range(max_weight + 1)] for x in range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight", "value pair should be of same length' no_items = 0 weight = 0", "5, 7, 8] # } if not isinstance(objects, dict): return 'Object should be", "objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]],", "max_weight): # object should be in the form of: # { # 'weight':", "isinstance(objects, dict): return 'Object should be a dict' elif len(objects['weight']) != len(objects['value']): return", "'value': [0, 4, 1, 5, 7, 8] # } if not isinstance(objects, dict):", "dict' elif len(objects['weight']) != len(objects['value']): return 'weight value pair should be of same", "be of same length' no_items = 0 weight = 0 two_dim_array = [[0", "- objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight", "+ two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] =", "0 or weight == 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight]", "while weight <= max_weight: if no_items == 0 or weight == 0: two_dim_array[no_items][weight]", "5, 6, 2, 7, 1] # 'value': [0, 4, 1, 5, 7, 8]", "len(objects['weight']) != len(objects['value']): return 'weight value pair should be of same length' no_items", "len(objects['weight']): while weight <= max_weight: if no_items == 0 or weight == 0:", "two_dim_array = [[0 for i in range(max_weight + 1)] for x in range(len(objects['weight']))]", "# 'value': [0, 4, 1, 5, 7, 8] # } if not isinstance(objects,", "object should be in the form of: # { # 'weight': [0, 5,", "7, 8] # } if not isinstance(objects, dict): return 'Object should be a", "1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight += 1 no_items +=", "range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight <= max_weight: if no_items == 0", "= max( objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] )", ") else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight += 1 no_items += 1", "7, 1] # 'value': [0, 4, 1, 5, 7, 8] # } if", "two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight += 1 no_items += 1 # print(two_dim_array[len(objects['weight'])][max_weight])", "if not isinstance(objects, dict): return 'Object should be a dict' elif len(objects['weight']) !=", "0 two_dim_array = [[0 for i in range(max_weight + 1)] for x in", "weight <= max_weight: if no_items == 0 or weight == 0: two_dim_array[no_items][weight] =", "in the form of: # { # 'weight': [0, 5, 6, 2, 7,", "weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items -", "the form of: # { # 'weight': [0, 5, 6, 2, 7, 1]", "for x in range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight <= max_weight: if", "<= max_weight: if no_items == 0 or weight == 0: two_dim_array[no_items][weight] = 0", "objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight +=", "== 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items]", "should be in the form of: # { # 'weight': [0, 5, 6,", "= 0 weight = 0 two_dim_array = [[0 for i in range(max_weight +", "in range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight <= max_weight: if no_items ==", "two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight]", "same length' no_items = 0 weight = 0 two_dim_array = [[0 for i", "or weight == 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] =", "def knapsack_dynamic_solution(objects, max_weight): # object should be in the form of: # {", "8] # } if not isinstance(objects, dict): return 'Object should be a dict'", "two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight += 1", "max( objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else:", "two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items", "!= len(objects['value']): return 'weight value pair should be of same length' no_items =", "1)] for x in range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight <= max_weight:", "'Object should be a dict' elif len(objects['weight']) != len(objects['value']): return 'weight value pair", "<= len(objects['weight']): while weight <= max_weight: if no_items == 0 or weight ==", "+ 1)] for x in range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight <=", "6, 2, 7, 1] # 'value': [0, 4, 1, 5, 7, 8] #", "[[0 for i in range(max_weight + 1)] for x in range(len(objects['weight']))] while no_items", "return 'weight value pair should be of same length' no_items = 0 weight", "- 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items -", "{ # 'weight': [0, 5, 6, 2, 7, 1] # 'value': [0, 4,", "= 0 two_dim_array = [[0 for i in range(max_weight + 1)] for x", "else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight += 1 no_items += 1 #", "weight == 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max(", "# object should be in the form of: # { # 'weight': [0,", "while no_items <= len(objects['weight']): while weight <= max_weight: if no_items == 0 or", "} if not isinstance(objects, dict): return 'Object should be a dict' elif len(objects['weight'])", "1] # 'value': [0, 4, 1, 5, 7, 8] # } if not", "i in range(max_weight + 1)] for x in range(len(objects['weight']))] while no_items <= len(objects['weight']):", "1, 5, 7, 8] # } if not isinstance(objects, dict): return 'Object should", "knapsack_dynamic_solution(objects, max_weight): # object should be in the form of: # { #", "len(objects['value']): return 'weight value pair should be of same length' no_items = 0", "0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] +", "<= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items", "= 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items -", "return 'Object should be a dict' elif len(objects['weight']) != len(objects['value']): return 'weight value", "1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight]", "0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items - 1][weight", "# 'weight': [0, 5, 6, 2, 7, 1] # 'value': [0, 4, 1,", "[0, 5, 6, 2, 7, 1] # 'value': [0, 4, 1, 5, 7,", "# } if not isinstance(objects, dict): return 'Object should be a dict' elif", "== 0 or weight == 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight:", "be in the form of: # { # 'weight': [0, 5, 6, 2,", "length' no_items = 0 weight = 0 two_dim_array = [[0 for i in", "2, 7, 1] # 'value': [0, 4, 1, 5, 7, 8] # }", "no_items == 0 or weight == 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <=", "- 1][weight] ) else: two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight] weight += 1 no_items", "# { # 'weight': [0, 5, 6, 2, 7, 1] # 'value': [0,", "= two_dim_array[no_items - 1][weight] weight += 1 no_items += 1 # print(two_dim_array[len(objects['weight'])][max_weight]) print(two_dim_array)", "weight = 0 two_dim_array = [[0 for i in range(max_weight + 1)] for", "'weight': [0, 5, 6, 2, 7, 1] # 'value': [0, 4, 1, 5,", "of same length' no_items = 0 weight = 0 two_dim_array = [[0 for", "if no_items == 0 or weight == 0: two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items]", "not isinstance(objects, dict): return 'Object should be a dict' elif len(objects['weight']) != len(objects['value']):", "in range(max_weight + 1)] for x in range(len(objects['weight']))] while no_items <= len(objects['weight']): while", "two_dim_array[no_items][weight] = 0 elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items", "should be of same length' no_items = 0 weight = 0 two_dim_array =", "of: # { # 'weight': [0, 5, 6, 2, 7, 1] # 'value':", "4, 1, 5, 7, 8] # } if not isinstance(objects, dict): return 'Object", "'weight value pair should be of same length' no_items = 0 weight =", "should be a dict' elif len(objects['weight']) != len(objects['value']): return 'weight value pair should", "no_items = 0 weight = 0 two_dim_array = [[0 for i in range(max_weight", "be a dict' elif len(objects['weight']) != len(objects['value']): return 'weight value pair should be", "elif objects['weight'][no_items] <= weight: two_dim_array[no_items][weight] = max( objects['value'][no_items] + two_dim_array[no_items - 1][weight -", "for i in range(max_weight + 1)] for x in range(len(objects['weight']))] while no_items <=", "[0, 4, 1, 5, 7, 8] # } if not isinstance(objects, dict): return", "0 weight = 0 two_dim_array = [[0 for i in range(max_weight + 1)]", "objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]], two_dim_array[no_items - 1][weight] ) else: two_dim_array[no_items][weight]", "a dict' elif len(objects['weight']) != len(objects['value']): return 'weight value pair should be of", "max_weight: if no_items == 0 or weight == 0: two_dim_array[no_items][weight] = 0 elif", "pair should be of same length' no_items = 0 weight = 0 two_dim_array", "form of: # { # 'weight': [0, 5, 6, 2, 7, 1] #", "= [[0 for i in range(max_weight + 1)] for x in range(len(objects['weight']))] while", "x in range(len(objects['weight']))] while no_items <= len(objects['weight']): while weight <= max_weight: if no_items", "elif len(objects['weight']) != len(objects['value']): return 'weight value pair should be of same length'", "no_items <= len(objects['weight']): while weight <= max_weight: if no_items == 0 or weight" ]
[ "code below because, the update wasn't working due to GSlides keeping (somewhere) a", "# if the file exists, delete it before uploading the new version Dev.pprint('deleting", "self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return", "file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file", "GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: #", "file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else: # return self.gdrive.file_update(png_file, 'image/png',", "if file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name", "below because, the update wasn't working due to GSlides keeping (somewhere) a cache", "update wasn't working due to GSlides keeping (somewhere) a cache of the previous", "because, the update wasn't working due to GSlides keeping (somewhere) a cache of", "exists, delete it before uploading the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return", "due to GSlides keeping (somewhere) a cache of the previous value # if", "= GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id:", "osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder =", "import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def", "def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata =", "GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata", "can't use the code below because, the update wasn't working due to GSlides", "to GSlides keeping (somewhere) a cache of the previous value # if file_id", "of the previous value # if file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id)", "GDrive from osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None):", "# can't use the code below because, the update wasn't working due to", "= self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file =", "the file exists, delete it before uploading the new version Dev.pprint('deleting file: {0}'.format(file_id))", "= 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id')", "file_id = self.graph_id_in_gdrive(file_name) if file_id: # if the file exists, delete it before", "# if file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else: # return", "from osbot_gsuite.apis.GDrive import GDrive from osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files class", "mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if", "file_id: # if the file exists, delete it before uploading the new version", "is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else: # return self.gdrive.file_update(png_file, 'image/png', file_id", "<reponame>pbx-gs/gsbot-gsuite<gh_stars>1-10 from osbot_gsuite.apis.GDrive import GDrive from osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files", "#png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if", "= Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: # if the", "osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id)", "mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None", "if file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else: # return self.gdrive.file_update(png_file,", "= 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata:", "def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None def", "delete it before uploading the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file,", "import Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs'", "self.graph_id_in_gdrive(file_name) if file_id: # if the file exists, delete it before uploading the", "from osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder", "if the file exists, delete it before uploading the new version Dev.pprint('deleting file:", "version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the", "the previous value # if file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) #", "file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file):", "self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: # if the file exists, delete it", "osbot_gsuite.apis.GDrive import GDrive from osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive:", "wasn't working due to GSlides keeping (somewhere) a cache of the previous value", "a cache of the previous value # if file_id is None: # return", "cache of the previous value # if file_id is None: # return self.gdrive.file_upload(png_file,'image/png',", "the update wasn't working due to GSlides keeping (somewhere) a cache of the", "value # if file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else: #", "the code below because, the update wasn't working due to GSlides keeping (somewhere)", "target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name)", "GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type =", "None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id()", "self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder,", "__init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return", "from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive =", "file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name =", "uploading the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) #", "'image/png', folder_id) # can't use the code below because, the update wasn't working", "Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: # if the file", "return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id')", "self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the code below because, the update wasn't", "None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else: # return self.gdrive.file_update(png_file, 'image/png', file_id )", "folder_id) # can't use the code below because, the update wasn't working due", "if file_id: # if the file exists, delete it before uploading the new", "def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder'", "return None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id =", "'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def", "{0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the code below because,", "file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id", "before uploading the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id)", "file exists, delete it before uploading the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id)", "(somewhere) a cache of the previous value # if file_id is None: #", "= GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name):", "self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the code below because, the", "return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the code below because, the update", "keeping (somewhere) a cache of the previous value # if file_id is None:", "return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file)", "png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name)", "Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self):", "= self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: # if the file exists, delete", "def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id", "working due to GSlides keeping (somewhere) a cache of the previous value #", "file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: # if", "self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name)", "file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the code below", "import GDrive from osbot_utils.utils.Dev import Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def", "new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use", "class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type", "self.gdrive = GDrive(gsuite_secret_id) def target_folder_id(self): mime_type = 'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self,", "folder_id = self.target_folder_id() file_id = self.graph_id_in_gdrive(file_name) if file_id: # if the file exists,", "the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't", "upload_png_file_to_gdrive(self, png_file): #png_file = GSBot_Helper().get_png_from_saved_graph(graph_name) file_name = Files.file_name(png_file) folder_id = self.target_folder_id() file_id =", "GSlides keeping (somewhere) a cache of the previous value # if file_id is", "previous value # if file_id is None: # return self.gdrive.file_upload(png_file,'image/png', folder_id) # else:", "Dev from osbot_utils.utils.Files import Files class GSBot_to_GDrive: def __init__(self,gsuite_secret_id=None): self.target_folder = 'gsbot-graphs' self.gdrive", "it before uploading the new version Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png',", "'application/vnd.google-apps.folder' return self.gdrive.find_by_name(self.target_folder, mime_type).get('id') def graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return", "Dev.pprint('deleting file: {0}'.format(file_id)) self.gdrive.file_delete(file_id) return self.gdrive.file_upload(png_file, 'image/png', folder_id) # can't use the code", "graph_id_in_gdrive(self, file_name): file_metadata = self.gdrive.find_by_name(file_name) if file_metadata: return file_metadata.get('id') return None def upload_png_file_to_gdrive(self,", "= self.graph_id_in_gdrive(file_name) if file_id: # if the file exists, delete it before uploading", "use the code below because, the update wasn't working due to GSlides keeping" ]
[ "+ 3*ygrid v = 2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid,", "`(nbr images - 1, nbr points, 2)` Returns ------- 3D array of shape", "np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid,", "= get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :,", "bilinear interpolation (ie. quad4 element). The xy-grid has to be rectangular. used to", "cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient for the vector fields u and", "= np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask] x, y = points[mask, :].T", "on a grid returns the centered finite difference for each cell Cell abcd:", "------- 4D array of shape (n_y, n_x, 2, 2) Lagrange Strain Tensor for", "Strain Tensor for all grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u,", "xgrid, ygrid : 2d arrays of shape (n_y, n_x) underformed grid points u,", "(u, v) could include NaN Returns ------- nd-array (2, 3) coefficients matrix (affine", "1/2*( grad(u) + grad(u)^T ) Parameters ---------- xgrid, ygrid : 2d arrays of", "difference gradient for the vector fields u and v evaluated at cell center", "(ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1,", "grad(u) + Id E = 1/2*( FF^T - Id ) Parameters ---------- xgrid,", "2)` \"\"\" # add zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :]", "+ Id # Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F, F) -", "add zeros at the begining Parameters ---------- displ_img_to_img : 3D array 3D array", "displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center", "integrate_displacement(displ_img_to_img): \"\"\"Sum the image-to-image displacement value to obtain image-to-reference displacement, add zeros at", "v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x,", "1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x, center_y", "displacements : nd-array (nbr_points, 2) displacement for each point (u, v) could include", "= 0.5*(u[:, 1:] + u[:, :-1]) v_center_y = 0.5*(v[1:, :] + v[:-1, :])", "get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1]", "- (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] +", ": nd-array (nbr_points, 2) coordinates of points (x, y) displacements : nd-array (nbr_points,", "= u - u_linear residuals_y = v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T", ": 2d arrays of shape (n_y, n_x) displacements values (u along x, v", "# Residuals: u_linear = np.matmul( M, p_ux ) v_linear = np.matmul( M, p_uy", "3D array of shape `(nbr images - 1, nbr points, 2)` Returns -------", "5, 7)**0.5) u = 5*xgrid + 3*ygrid v = 2*xgrid + 7*ygrid (dudx,", "(see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear", "ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid + 3*ygrid", ":-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1,", "axes=(0, 1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]]) # Strain", "(x, y) displacements : nd-array (nbr_points, 2) displacement for each point (u, v)", "u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def", "displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\"", "at the begining Parameters ---------- displ_img_to_img : 3D array 3D array of shape", "for the vector fields u and v evaluated at cell center This is", "image-to-reference displacement, add zeros at the begining Parameters ---------- displ_img_to_img : 3D array", "du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid,", "# v = 5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v], axis=3) G =", "= v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :]", "+ xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x, center_y def", "the vector fields u and v evaluated at cell center This is not", ":, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :,", "- u_linear residuals_y = v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge", "nbr points, 2)` Returns ------- 3D array of shape `(nbr images, nbr points,", "0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain Tensor", "grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v,", ":] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference", "M = np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None)", "np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M, p_ux ) v_linear = np.matmul( M,", "= (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:,", "1, 5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid + 3*ygrid v = 2*xgrid", "delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient for the vector fields", ",1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1],", "0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1", "0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) # --- def", "mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask] x, y = points[mask,", "ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask, :] = residuals_xy return coefficients,", "y = points[mask, :].T ones = np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy,", "du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1, :])", "p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear =", ") Parameters ---------- xgrid, ygrid : 2d arrays of shape (n_y, n_x) underformed", "# --- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5)", "np.linspace(1, 5, 7)) u = 1*xgrid + 3*ygrid v = 5*xgrid + 7*ygrid", "xy-grid has to be rectangular. used to computed the \"Displacement gradient tensor\" see", "zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero,", "Returns ------- 4D array of shape (n_y, n_x, 2, 2) Lagrange Strain Tensor", "np.stack(grad_v, axis=2) # u = 1*xgrid + 3*ygrid # v = 5*xgrid +", "= np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan)", "#sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M, p_ux", ">>> array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] =", "ygrid, u, v): \"\"\"Small Displacement Strain Tensor (E) E = 1/2*( grad(u) +", "center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:]", "dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return", "n_x, 2, 2) Lagrange Strain Tensor for all grid points \"\"\" grad_u, grad_v", ":, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain", "v): \"\"\"Finite difference gradient for the vector fields u and v evaluated at", "# G >>> array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:,", "u_center_x = 0.5*(u[:, 1:] + u[:, :-1]) v_center_y = 0.5*(v[1:, :] + v[:-1,", "get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor (E) F = grad(u) + Id", "Tensor for all grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)", "at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0)", "np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0, 1, 3, 2)) # G >>>", "+ 3*ygrid # v = 5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v], axis=3)", "is not a proper bilinear interpolation (ie. quad4 element). The xy-grid has to", "5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0, 1,", "= np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux,", "y) sampled on a grid returns the centered finite difference for each cell", "points[mask, :].T ones = np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy, _residual_y, _rank,", "Parameters ---------- points : nd-array (nbr_points, 2) coordinates of points (x, y) displacements", "+ │ c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2", "\"Displacement gradient tensor\" see Bower p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x,", "= np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0, 1, 3, 2)) # G", ":-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] -", "[5., 7.]]) # Strain Tensor E = 0.5*( G + np.transpose(G, axes=(0, 1,", "+ t Parameters ---------- points : nd-array (nbr_points, 2) coordinates of points (x,", "+ 3*ygrid v = 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v)", "u[:, :-1]) v_center_y = 0.5*(v[1:, :] + v[:-1, :]) v_center_x = 0.5*(v[:, 1:]", "23*np.ones_like(E[:, :, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement", "1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:,", "0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) ) return E def bilinear_fit(points,", "np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M, p_ux ) v_linear", "G = np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0, 1, 3, 2)) #", "ones = np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s =", "= u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :]", "u, v) grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) # u =", "= np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2, 2) # Id[0, 0] >>", "2) # Id[0, 0] >> array([[1., 0.], [0., 1.]]) F = G +", "points, 2)` Returns ------- 3D array of shape `(nbr images, nbr points, 2)`", "7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2, 2) # Id[0,", "evaluated at cell center This is not a proper bilinear interpolation (ie. quad4", "v = 2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u,", "center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1])", "(dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy,", "rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy])", "np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x,", "[[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1,", "def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on the displacements field Solve the", "= np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u = 1*xgrid + 3*ygrid v", "3, 2)) # G >>> array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2,", "proper bilinear interpolation (ie. quad4 element). The xy-grid has to be rectangular. used", "axis=2) grad_v = np.stack(grad_v, axis=2) # u = 1*xgrid + 3*ygrid # v", "u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x", "coefficients matrix (affine transformation + translation) nd-array (nbr_points, 2) residuals for each points", "test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u =", ":, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :,", "3) coefficients matrix (affine transformation + translation) nd-array (nbr_points, 2) residuals for each", "1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2],", "= 1*xgrid + 3*ygrid v = 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid,", "NaN Returns ------- nd-array (2, 3) coefficients matrix (affine transformation + translation) nd-array", "along x, v along y) Returns ------- 4D array of shape (n_y, n_x,", "x, y = points[mask, :].T ones = np.ones_like(x) M = np.vstack([x, y, ones]).T", "the \"Displacement gradient tensor\" see Bower p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\"", "# array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0", "v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite", "(n_y, n_x, 2, 2) Lagrange Strain Tensor for all grid points \"\"\" grad_u,", "u, v): \"\"\"Finite difference gradient for the vector fields u and v evaluated", "2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor", "(ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1, :]) u_center_x =", "0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1", "G + np.transpose(G, axes=(0, 1, 3, 2)) ) return E def bilinear_fit(points, displacements):", "0] >> array([[1., 0.], [0., 1.]]) F = G + Id # Lagrange", "coordinates of points (x, y) displacements : nd-array (nbr_points, 2) displacement for each", "= np.eye(2, 2) # Id[0, 0] >> array([[1., 0.], [0., 1.]]) F =", "y) Returns ------- 4D array of shape (n_y, n_x, 2, 2) Lagrange Strain", "T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul(", "[dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1,", "equation u = A*x + t Parameters ---------- points : nd-array (nbr_points, 2)", "--- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain Tensor (E) E =", "all grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u =", "\"\"\" for a given 2D vector field [u, v](x, y) sampled on a", ": 3D array 3D array of shape `(nbr images - 1, nbr points,", ":] + u[:-1, :]) u_center_x = 0.5*(u[:, 1:] + u[:, :-1]) v_center_y =", "v[mask] x, y = points[mask, :].T ones = np.ones_like(x) M = np.vstack([x, y,", "+ 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.], # [23.,", "v_center_x = 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:,", "0.], [0., 1.]]) F = G + Id # Lagrange Strain Tensor E", "--- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor (E) F = grad(u)", "point (u, v) could include NaN Returns ------- nd-array (2, 3) coefficients matrix", "2D vector field [u, v](x, y) sampled on a grid returns the centered", "3D array 3D array of shape `(nbr images - 1, nbr points, 2)`", "cell Cell abcd: a───b │ + │ c───d du_x = (ub+ud)/2 - (ua+uc)/2", "np.transpose(G, axes=(0, 1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]]) Id", "displacements field Solve the equation u = A*x + t Parameters ---------- points", "FF^T - Id ) Parameters ---------- xgrid, ygrid : 2d arrays of shape", "np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask] x, y = points[mask, :].T ones", "xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for", "np.einsum('...ki,...kj', F, F) - Id ) return E # --- test get_LagrangeStrainTensor xgrid,", "zeros at the begining Parameters ---------- displ_img_to_img : 3D array 3D array of", "F = grad(u) + Id E = 1/2*( FF^T - Id ) Parameters", "E = 1/2*( FF^T - Id ) Parameters ---------- xgrid, ygrid : 2d", ":, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) # ---", "matrix (affine transformation + translation) nd-array (nbr_points, 2) residuals for each points \"\"\"", "Unbiased estimator variance (see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1))", "v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid,", "v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :] return", "= cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy],", "displacements values (u along x, v along y) Returns ------- 4D array of", "7)**0.5) u = 5*xgrid + 3*ygrid v = 2*xgrid + 7*ygrid (dudx, dudy),", "ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # ---", "# --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor (E) F =", "test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u =", ":] - v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u,", "Solve the equation u = A*x + t Parameters ---------- points : nd-array", "# u = 1*xgrid + 3*ygrid # v = 5*xgrid + 7*ygrid G", "array of shape `(nbr images, nbr points, 2)` \"\"\" # add zeros at", "- Id ) return E # --- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1,", "Strain Tensor (E) F = grad(u) + Id E = 1/2*( FF^T -", "return E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on the displacements field", "ygrid, u, v) # array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0],", "\"\"\"Performs a bilinear fit on the displacements field Solve the equation u =", "y, ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank,", "arrays of shape (n_y, n_x) displacements values (u along x, v along y)", "array of shape (n_y, n_x, 2, 2) Lagrange Strain Tensor for all grid", "points u, v : 2d arrays of shape (n_y, n_x) displacements values (u", "= np.transpose(G, axes=(0, 1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]])", "grad_v = np.stack(grad_v, axis=2) # u = 1*xgrid + 3*ygrid # v =", "F, F) - Id ) return E # --- test get_LagrangeStrainTensor xgrid, ygrid", "4D array of shape (n_y, n_x, 2, 2) Lagrange Strain Tensor for all", "np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1))", "of shape (n_y, n_x) underformed grid points u, v : 2d arrays of", "(nbr_points, 2) coordinates of points (x, y) displacements : nd-array (nbr_points, 2) displacement", "3*ygrid v = 2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid,", "1, 5), np.linspace(1, 5, 7)) u = 1*xgrid + 3*ygrid v = 5*xgrid", "Id E = 1/2*( FF^T - Id ) Parameters ---------- xgrid, ygrid :", "array([[1., 0.], [0., 1.]]) F = G + Id # Lagrange Strain Tensor", ": nd-array (nbr_points, 2) displacement for each point (u, v) could include NaN", "\"\"\" u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask],", "residuals_y = v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with ignored", "F = G + Id # Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj',", "0.5*(u[1:, :] + u[:-1, :]) u_center_x = 0.5*(u[:, 1:] + u[:, :-1]) v_center_y", "the centered finite difference for each cell Cell abcd: a───b │ + │", "Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F, F) - Id ) return E", "bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on the displacements field Solve the equation", "v) # array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :,", "for all grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u", "= np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) # u = 1*xgrid + 3*ygrid", "(n_y, n_x) underformed grid points u, v : 2d arrays of shape (n_y,", ":] - u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y =", "nd-array (nbr_points, 2) residuals for each points \"\"\" u, v = displacements.T mask", "np.linspace(1, 5, 7)**0.5) u = 5*xgrid + 3*ygrid v = 2*xgrid + 7*ygrid", "u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:, :]", "3*ygrid v = 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) #", "2)) ) return E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on the", "0.5*( np.einsum('...ki,...kj', F, F) - Id ) return E # --- test get_LagrangeStrainTensor", ":, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :,", "v) grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) # u = 1*xgrid", "3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2, 2)", "7)) u = 1*xgrid + 3*ygrid v = 5*xgrid + 7*ygrid E =", "xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid +", "return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient", "to be rectangular. used to computed the \"Displacement gradient tensor\" see Bower p.14", "np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid + 3*ygrid v =", "E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:,", "du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2,", "0.5*(u[:, 1:] + u[:, :-1]) v_center_y = 0.5*(v[1:, :] + v[:-1, :]) v_center_x", "the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref", "+ ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:]", "G >>> array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :]", "= 1/2*( FF^T - Id ) Parameters ---------- xgrid, ygrid : 2d arrays", "+ translation) nd-array (nbr_points, 2) residuals for each points \"\"\" u, v =", "np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask,", "- v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x, delta_u_y,", "\"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1,", "\"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy =", "--- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u", "residuals for each points \"\"\" u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v)))", "return E # --- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1,", "dvdy) \"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy", "(E) E = 1/2*( grad(u) + grad(u)^T ) Parameters ---------- xgrid, ygrid :", "Tensor (E) F = grad(u) + Id E = 1/2*( FF^T - Id", "axes=(0, 1, 3, 2)) ) return E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear", "M, p_ux ) v_linear = np.matmul( M, p_uy ) residuals_x = u -", "np def integrate_displacement(displ_img_to_img): \"\"\"Sum the image-to-image displacement value to obtain image-to-reference displacement, add", "could include NaN Returns ------- nd-array (2, 3) coefficients matrix (affine transformation +", "= v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN", "\"\"\"Small Displacement Strain Tensor (E) E = 1/2*( grad(u) + grad(u)^T ) Parameters", "def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient for the vector fields u", "dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy,", "grad(u)^T ) Parameters ---------- xgrid, ygrid : 2d arrays of shape (n_y, n_x)", "0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x =", "7.]]) # Strain Tensor E = 0.5*( G + np.transpose(G, axes=(0, 1, 3,", "+ np.transpose(G, axes=(0, 1, 3, 2)) ) return E def bilinear_fit(points, displacements): \"\"\"Performs", "array([[1., 3.], [5., 7.]]) # Strain Tensor E = 0.5*( G + np.transpose(G,", "(n_y, n_x) displacements values (u along x, v along y) Returns ------- 4D", "np.transpose(G, axes=(0, 1, 3, 2)) ) return E def bilinear_fit(points, displacements): \"\"\"Performs a", "Id ) return E # --- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1,", "#sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M, p_ux ) v_linear =", "(ie. quad4 element). The xy-grid has to be rectangular. used to computed the", "variance (see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals:", "# G >>> array([[1., 3.], [5., 7.]]) # Strain Tensor E = 0.5*(", "2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx,", "v[:-1, :]) v_center_x = 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x = u_center_y[:, 1:]", "fields u and v evaluated at cell center This is not a proper", "= 5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0,", "2d arrays of shape (n_y, n_x) underformed grid points u, v : 2d", ":].T ones = np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s", "to computed the \"Displacement gradient tensor\" see Bower p.14 output: (dudx, dudy), (dvdx,", "to obtain image-to-reference displacement, add zeros at the begining Parameters ---------- displ_img_to_img :", "v_center_y = 0.5*(v[1:, :] + v[:-1, :]) v_center_x = 0.5*(v[:, 1:] + v[:,", "ygrid, u, v) grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) # u", "displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid,", "_residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased", "du_x = (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y =", "axes=(0, 1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]]) Id =", "Displacement Strain Tensor (E) E = 1/2*( grad(u) + grad(u)^T ) Parameters ----------", "a bilinear fit on the displacements field Solve the equation u = A*x", "u and v evaluated at cell center This is not a proper bilinear", "displacement, add zeros at the begining Parameters ---------- displ_img_to_img : 3D array 3D", "center_y def cellcentered_diff_2D(u, v): \"\"\" for a given 2D vector field [u, v](x,", "\"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1, :]) u_center_x = 0.5*(u[:, 1:] +", "+ ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1]", "1:] + xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for a", "5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.], #", "# --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain Tensor (E) E", "delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def", "36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) #", "(nbr_points, 2) displacement for each point (u, v) could include NaN Returns -------", "= grad(u) + Id E = 1/2*( FF^T - Id ) Parameters ----------", "finite difference for each cell Cell abcd: a───b │ + │ c───d du_x", "dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx,", "not a proper bilinear interpolation (ie. quad4 element). The xy-grid has to be", "u - u_linear residuals_y = v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T #", "grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u, axis=2) grad_v =", "1:] + u[:, :-1]) v_center_y = 0.5*(v[1:, :] + v[:-1, :]) v_center_x =", "np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return", "vector field [u, v](x, y) sampled on a grid returns the centered finite", ":-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y", "v : 2d arrays of shape (n_y, n_x) displacements values (u along x,", "points \"\"\" u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v =", "1:] + v[:, :-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y =", "= (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1, :]) u_center_x", ") return E # --- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5),", "np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid,", "rectangular. used to computed the \"Displacement gradient tensor\" see Bower p.14 output: (dudx,", "cellcentered_diff_2D(u, v): \"\"\" for a given 2D vector field [u, v](x, y) sampled", "v along y) Returns ------- 4D array of shape (n_y, n_x, 2, 2)", "= cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2)", "v) could include NaN Returns ------- nd-array (2, 3) coefficients matrix (affine transformation", "3.], [5., 7.]]) # Strain Tensor E = 0.5*( G + np.transpose(G, axes=(0,", "# Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F, F) - Id )", "23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:,", "7*ygrid G = np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0, 1, 3, 2))", "_residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M,", "= 1/2*( grad(u) + grad(u)^T ) Parameters ---------- xgrid, ygrid : 2d arrays", "np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:,", "2)) Id[:, :] = np.eye(2, 2) # Id[0, 0] >> array([[1., 0.], [0.,", "= np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see", "= 0.5*(v[1:, :] + v[:-1, :]) v_center_x = 0.5*(v[:, 1:] + v[:, :-1])", "begining Parameters ---------- displ_img_to_img : 3D array 3D array of shape `(nbr images", "u = 5*xgrid + 3*ygrid v = 2*xgrid + 7*ygrid (dudx, dudy), (dvdx,", "+ ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:]", "return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1,", "ygrid): \"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] +", "= 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1]) return", "5), np.linspace(1, 5, 7)) u = 1*xgrid + 3*ygrid v = 5*xgrid +", "import numpy as np def integrate_displacement(displ_img_to_img): \"\"\"Sum the image-to-image displacement value to obtain", "cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx,", "return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:]", "array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1]))", "= 1*xgrid + 3*ygrid # v = 5*xgrid + 7*ygrid G = np.stack([grad_u,", "displacement for each point (u, v) could include NaN Returns ------- nd-array (2,", "# [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :,", "and v evaluated at cell center This is not a proper bilinear interpolation", "_ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # ---", "+ 7*ygrid G = np.stack([grad_u, grad_v], axis=3) G = np.transpose(G, axes=(0, 1, 3,", "v): \"\"\" for a given 2D vector field [u, v](x, y) sampled on", "cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid", ":-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u, v):", "xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u,", "begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref =", "E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on the displacements field Solve", "interpolation (ie. quad4 element). The xy-grid has to be rectangular. used to computed", "= u[mask], v[mask] x, y = points[mask, :].T ones = np.ones_like(x) M =", "array 3D array of shape `(nbr images - 1, nbr points, 2)` Returns", "= np.matmul( M, p_uy ) residuals_x = u - u_linear residuals_y = v", "ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] +", ": 2d arrays of shape (n_y, n_x) underformed grid points u, v :", "get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0", "axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:,", "np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell", "u, v : 2d arrays of shape (n_y, n_x) displacements values (u along", "u_linear residuals_y = v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with", "points : nd-array (nbr_points, 2) coordinates of points (x, y) displacements : nd-array", "return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for a given 2D vector field", "Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2, 2) # Id[0, 0]", "= 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) ) return E def", "v evaluated at cell center This is not a proper bilinear interpolation (ie.", "gradient tensor\" see Bower p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y,", "2) coordinates of points (x, y) displacements : nd-array (nbr_points, 2) displacement for", "xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for a given 2D", "│ c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\"", "has to be rectangular. used to computed the \"Displacement gradient tensor\" see Bower", "1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1,", "nd-array (nbr_points, 2) coordinates of points (x, y) displacements : nd-array (nbr_points, 2)", "residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN values: residuals_NaN = np.full(displacements.shape,", "Tensor E = 0.5*( np.einsum('...ki,...kj', F, F) - Id ) return E #", "# Merge with ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask, :] =", "the equation u = A*x + t Parameters ---------- points : nd-array (nbr_points,", "np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u = 1*xgrid + 3*ygrid v =", "bilinear fit on the displacements field Solve the equation u = A*x +", "2, 2) Lagrange Strain Tensor for all grid points \"\"\" grad_u, grad_v =", "= np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid):", "3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange", "- (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1, :]) u_center_x = 0.5*(u[:,", "_s = np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator variance", "M, p_uy ) residuals_x = u - u_linear residuals_y = v - v_linear", "5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid + 3*ygrid v = 2*xgrid +", ":-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] -", "= 5*xgrid + 3*ygrid v = 2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy)", "1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x,", "- u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x = v_center_y[:,", "# add zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero =", "dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5,", ",1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1]))", "0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x,", "v_linear = np.matmul( M, p_uy ) residuals_x = u - u_linear residuals_y =", "v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid,", "5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u,", "Strain Tensor E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) )", ":, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :,", "nd-array (nbr_points, 2) displacement for each point (u, v) could include NaN Returns", "v[:, :-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:, :]", "\"\"\"Sum the image-to-image displacement value to obtain image-to-reference displacement, add zeros at the", "## Unbiased estimator variance (see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y =", "E = 0.5*( np.einsum('...ki,...kj', F, F) - Id ) return E # ---", "grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) # u = 1*xgrid +", "zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img],", "fit on the displacements field Solve the equation u = A*x + t", ":] delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] -", ":] + v[:-1, :]) v_center_x = 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x =", "dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D", "2)) # G >>> array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2))", "2) residuals for each points \"\"\" u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u),", "2, 2)) Id[:, :] = np.eye(2, 2) # Id[0, 0] >> array([[1., 0.],", "3D array of shape `(nbr images, nbr points, 2)` \"\"\" # add zeros", "= np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients", "include NaN Returns ------- nd-array (2, 3) coefficients matrix (affine transformation + translation)", "center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for a given 2D vector field [u,", "transformation + translation) nd-array (nbr_points, 2) residuals for each points \"\"\" u, v", "(dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx))", ") return E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on the displacements", "= points[mask, :].T ones = np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy, _residual_y,", "n_x) underformed grid points u, v : 2d arrays of shape (n_y, n_x)", "u, v) # array([[[[14., 23.], # [23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:,", "F) - Id ) return E # --- test get_LagrangeStrainTensor xgrid, ygrid =", "of points (x, y) displacements : nd-array (nbr_points, 2) displacement for each point", "np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:,", "the image-to-image displacement value to obtain image-to-reference displacement, add zeros at the begining", ":-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:,", "delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:, :] - v_center_x[:-1,", "ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] +", "image-to-image displacement value to obtain image-to-reference displacement, add zeros at the begining Parameters", "= u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1]", "on the displacements field Solve the equation u = A*x + t Parameters", "v = u[mask], v[mask] x, y = points[mask, :].T ones = np.ones_like(x) M", "_s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None)", "\"\"\" # add zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero", ":-1]) return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for a given 2D vector", "nbr points, 2)` \"\"\" # add zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis,", "------- 3D array of shape `(nbr images, nbr points, 2)` \"\"\" # add", "def cellcentered_diff_2D(u, v): \"\"\" for a given 2D vector field [u, v](x, y)", "Parameters ---------- xgrid, ygrid : 2d arrays of shape (n_y, n_x) underformed grid", "images, nbr points, 2)` \"\"\" # add zeros at the begining zeros =", "np.eye(2, 2) # Id[0, 0] >> array([[1., 0.], [0., 1.]]) F = G", "\"\"\"Lagrange Strain Tensor (E) F = grad(u) + Id E = 1/2*( FF^T", ":] = np.eye(2, 2) # Id[0, 0] >> array([[1., 0.], [0., 1.]]) F", "cell center This is not a proper bilinear interpolation (ie. quad4 element). The", "value to obtain image-to-reference displacement, add zeros at the begining Parameters ---------- displ_img_to_img", "array([[1., 3.], [5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2,", "Merge with ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask, :] = residuals_xy", "a given 2D vector field [u, v](x, y) sampled on a grid returns", ">> array([[1., 0.], [0., 1.]]) F = G + Id # Lagrange Strain", "cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) #", "0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0", "[5., 7.]]) Id = np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2, 2) #", "displ_img_to_img : 3D array 3D array of shape `(nbr images - 1, nbr", "= 0.5*( np.einsum('...ki,...kj', F, F) - Id ) return E # --- test", "difference for each cell Cell abcd: a───b │ + │ c───d du_x =", "---------- displ_img_to_img : 3D array 3D array of shape `(nbr images - 1,", "p_uy ) residuals_x = u - u_linear residuals_y = v - v_linear residuals_xy", "u[mask], v[mask] x, y = points[mask, :].T ones = np.ones_like(x) M = np.vstack([x,", "= 0.5*(u[1:, :] + u[:-1, :]) u_center_x = 0.5*(u[:, 1:] + u[:, :-1])", "displacements): \"\"\"Performs a bilinear fit on the displacements field Solve the equation u", "c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y", "residuals_x = u - u_linear residuals_y = v - v_linear residuals_xy = np.vstack([residuals_x,", "1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0", "ygrid : 2d arrays of shape (n_y, n_x) underformed grid points u, v", "points, 2)` \"\"\" # add zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :,", "delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient for the", "E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) ) return E", "numpy as np def integrate_displacement(displ_img_to_img): \"\"\"Sum the image-to-image displacement value to obtain image-to-reference", "G = np.transpose(G, axes=(0, 1, 3, 2)) # G >>> array([[1., 3.], [5.,", "= np.matmul( M, p_ux ) v_linear = np.matmul( M, p_uy ) residuals_x =", "a proper bilinear interpolation (ie. quad4 element). The xy-grid has to be rectangular.", "np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see p47", "coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1,", "points (x, y) displacements : nd-array (nbr_points, 2) displacement for each point (u,", "[0., 1.]]) F = G + Id # Lagrange Strain Tensor E =", "3, 2)) ) return E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit on", "u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see p47 T.", "`(nbr images, nbr points, 2)` \"\"\" # add zeros at the begining zeros", "+ Id E = 1/2*( FF^T - Id ) Parameters ---------- xgrid, ygrid", "centered finite difference for each cell Cell abcd: a───b │ + │ c───d", "each points \"\"\" u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v", "v, rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux,", "1, nbr points, 2)` Returns ------- 3D array of shape `(nbr images, nbr", "element). The xy-grid has to be rectangular. used to computed the \"Displacement gradient", "= 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.],", "values (u along x, v along y) Returns ------- 4D array of shape", "= cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx))", "for a given 2D vector field [u, v](x, y) sampled on a grid", "dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx,", "(nbr_points, 2) residuals for each points \"\"\" u, v = displacements.T mask =", "(E) F = grad(u) + Id E = 1/2*( FF^T - Id )", "v = 5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v], axis=3) G = np.transpose(G,", "of shape `(nbr images, nbr points, 2)` \"\"\" # add zeros at the", "given 2D vector field [u, v](x, y) sampled on a grid returns the", "n_x) displacements values (u along x, v along y) Returns ------- 4D array", "_rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u,", ") v_linear = np.matmul( M, p_uy ) residuals_x = u - u_linear residuals_y", "0.5*(v[1:, :] + v[:-1, :]) v_center_x = 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x", ",1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain Tensor (E)", "This is not a proper bilinear interpolation (ie. quad4 element). The xy-grid has", "Id # Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F, F) - Id", "Returns ------- 3D array of shape `(nbr images, nbr points, 2)` \"\"\" #", "grid returns the centered finite difference for each cell Cell abcd: a───b │", "= v_center_x[1:, :] - v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid,", "Id[0, 0] >> array([[1., 0.], [0., 1.]]) F = G + Id #", "(dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx,", ":-1]) v_center_y = 0.5*(v[1:, :] + v[:-1, :]) v_center_x = 0.5*(v[:, 1:] +", "along y) Returns ------- 4D array of shape (n_y, n_x, 2, 2) Lagrange", "5*xgrid + 3*ygrid v = 2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy) =", "23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:,", ",1])) np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0],", "Strain Tensor (E) E = 1/2*( grad(u) + grad(u)^T ) Parameters ---------- xgrid,", "(2, 3) coefficients matrix (affine transformation + translation) nd-array (nbr_points, 2) residuals for", "for each point (u, v) could include NaN Returns ------- nd-array (2, 3)", "center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1])", "x, v along y) Returns ------- 4D array of shape (n_y, n_x, 2,", "arrays of shape (n_y, n_x) underformed grid points u, v : 2d arrays", "# Strain Tensor E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2))", "as np def integrate_displacement(displ_img_to_img): \"\"\"Sum the image-to-image displacement value to obtain image-to-reference displacement,", "np.ones((*grad_u.shape[:2], 2, 2)) Id[:, :] = np.eye(2, 2) # Id[0, 0] >> array([[1.,", "\"\"\"Finite difference gradient for the vector fields u and v evaluated at cell", "= displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask] x, y", ",1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid,", "- v_center_x[:-1, :] return delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v):", "1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]]) # Strain Tensor", "delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] - v_center_y[:,", "np.matmul( M, p_ux ) v_linear = np.matmul( M, p_uy ) residuals_x = u", "G + Id # Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F, F)", "each point (u, v) could include NaN Returns ------- nd-array (2, 3) coefficients", "a grid returns the centered finite difference for each cell Cell abcd: a───b", "│ + │ c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 -", "0.5*(v[:, 1:] + v[:, :-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y", ") residuals_x = u - u_linear residuals_y = v - v_linear residuals_xy =", "+ v[:-1, :]) v_center_x = 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x = u_center_y[:,", "Cell abcd: a───b │ + │ c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y", "(uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :] + u[:-1, :]) u_center_x = 0.5*(u[:, 1:]", "the displacements field Solve the equation u = A*x + t Parameters ----------", "= np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M,", "np.matmul( M, p_uy ) residuals_x = u - u_linear residuals_y = v -", "cellcentered_grad_rect2D xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid", "images - 1, nbr points, 2)` Returns ------- 3D array of shape `(nbr", "residuals_y]).T # Merge with ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask, :]", "gradient for the vector fields u and v evaluated at cell center This", "1*xgrid + 3*ygrid # v = 5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v],", "axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point", "v): \"\"\"Small Displacement Strain Tensor (E) E = 1/2*( grad(u) + grad(u)^T )", "field [u, v](x, y) sampled on a grid returns the centered finite difference", ":]) u_center_x = 0.5*(u[:, 1:] + u[:, :-1]) v_center_y = 0.5*(v[1:, :] +", "u = 1*xgrid + 3*ygrid # v = 5*xgrid + 7*ygrid G =", "u[:-1, :]) u_center_x = 0.5*(u[:, 1:] + u[:, :-1]) v_center_y = 0.5*(v[1:, :]", "np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:,", "see Bower p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y", "xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u = 1*xgrid +", "t Parameters ---------- points : nd-array (nbr_points, 2) coordinates of points (x, y)", "delta_u_x, delta_u_y, delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient for", "of shape (n_y, n_x, 2, 2) Lagrange Strain Tensor for all grid points", "estimator variance (see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) #", "ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid, ygrid =", "Tensor E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) ) return", "= A*x + t Parameters ---------- points : nd-array (nbr_points, 2) coordinates of", "= G + Id # Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F,", "- v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN values: residuals_NaN", "1*xgrid + 3*ygrid v = 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u,", "Bower p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y =", "grad_v], axis=3) G = np.transpose(G, axes=(0, 1, 3, 2)) # G >>> array([[1.,", "[23., 36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0", "1.]]) F = G + Id # Lagrange Strain Tensor E = 0.5*(", "field Solve the equation u = A*x + t Parameters ---------- points :", "v): \"\"\"Lagrange Strain Tensor (E) F = grad(u) + Id E = 1/2*(", ",0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1]))", "def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:,", "def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor (E) F = grad(u) +", "points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u, axis=2)", "14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:,", "Tensor (E) E = 1/2*( grad(u) + grad(u)^T ) Parameters ---------- xgrid, ygrid", "returns the centered finite difference for each cell Cell abcd: a───b │ +", "for each cell Cell abcd: a───b │ + │ c───d du_x = (ub+ud)/2", "of shape (n_y, n_x) displacements values (u along x, v along y) Returns", "2d arrays of shape (n_y, n_x) displacements values (u along x, v along", "= np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y", "p_uy]) ## Unbiased estimator variance (see p47 T. Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y", "+ grad(u)^T ) Parameters ---------- xgrid, ygrid : 2d arrays of shape (n_y,", "quad4 element). The xy-grid has to be rectangular. used to computed the \"Displacement", "5, 7)) u = 1*xgrid + 3*ygrid v = 5*xgrid + 7*ygrid E", "u_linear = np.matmul( M, p_ux ) v_linear = np.matmul( M, p_uy ) residuals_x", "= np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M, p_ux )", "delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1,", "\"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u, axis=2) grad_v", "underformed grid points u, v : 2d arrays of shape (n_y, n_x) displacements", "be rectangular. used to computed the \"Displacement gradient tensor\" see Bower p.14 output:", "# Id[0, 0] >> array([[1., 0.], [0., 1.]]) F = G + Id", "np.cumsum(displ_zero, axis=0) return displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y =", "u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:]", "= 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1]", "7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14., 23.], # [23., 36.]],", "delta_v_x, delta_v_y def cellcentered_grad_rect2D(xgrid, ygrid, u, v): \"\"\"Finite difference gradient for the vector", "p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s =", "+ v[:, :-1]) delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:,", "u_center_y = 0.5*(u[1:, :] + u[:-1, :]) u_center_x = 0.5*(u[:, 1:] + u[:,", "p_ux ) v_linear = np.matmul( M, p_uy ) residuals_x = u - u_linear", "np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients =", "1, 3, 2)) ) return E def bilinear_fit(points, displacements): \"\"\"Performs a bilinear fit", "v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask] x,", "grad(u) + grad(u)^T ) Parameters ---------- xgrid, ygrid : 2d arrays of shape", "- u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y = v_center_x[1:,", "shape (n_y, n_x, 2, 2) Lagrange Strain Tensor for all grid points \"\"\"", "E # --- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5,", "shape `(nbr images, nbr points, 2)` \"\"\" # add zeros at the begining", "(ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2 - (uc+ud)/2 \"\"\" u_center_y = 0.5*(u[1:, :]", "u = A*x + t Parameters ---------- points : nd-array (nbr_points, 2) coordinates", "v - v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN values:", "------- nd-array (2, 3) coefficients matrix (affine transformation + translation) nd-array (nbr_points, 2)", "shape (n_y, n_x) displacements values (u along x, v along y) Returns -------", "NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask, :] = residuals_xy return coefficients, residuals_NaN", "= cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test cellcentered_grad_rect2D xgrid,", "(u along x, v along y) Returns ------- 4D array of shape (n_y,", "p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ##", "np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) # u = 1*xgrid + 3*ygrid #", "ygrid, u, v): \"\"\"Finite difference gradient for the vector fields u and v", "u = 1*xgrid + 3*ygrid v = 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid,", "3*ygrid # v = 5*xgrid + 7*ygrid G = np.stack([grad_u, grad_v], axis=3) G", "np.transpose(G, axes=(0, 1, 3, 2)) # G >>> array([[1., 3.], [5., 7.]]) #", "np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor (E)", "ygrid[:-1, :-1]) center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] +", "= np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see p47 T. Hastie) #sigma_hat_x =", "shape (n_y, n_x) underformed grid points u, v : 2d arrays of shape", "np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain", "grid points u, v : 2d arrays of shape (n_y, n_x) displacements values", "translation) nd-array (nbr_points, 2) residuals for each points \"\"\" u, v = displacements.T", "center This is not a proper bilinear interpolation (ie. quad4 element). The xy-grid", "= np.stack(grad_v, axis=2) # u = 1*xgrid + 3*ygrid # v = 5*xgrid", "point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] +", "_xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] # --- test", "def integrate_displacement(displ_img_to_img): \"\"\"Sum the image-to-image displacement value to obtain image-to-reference displacement, add zeros", "2)) # G >>> array([[1., 3.], [5., 7.]]) # Strain Tensor E =", "displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask] x, y =", "(dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx))", "get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u = 1*xgrid", "+ u[:-1, :]) u_center_x = 0.5*(u[:, 1:] + u[:, :-1]) v_center_y = 0.5*(v[1:,", "np.isnan(v))) u, v = u[mask], v[mask] x, y = points[mask, :].T ones =", "of shape `(nbr images - 1, nbr points, 2)` Returns ------- 3D array", ":]) v_center_x = 0.5*(v[:, 1:] + v[:, :-1]) delta_u_x = u_center_y[:, 1:] -", "_rank, _s = np.linalg.lstsq(M, u, rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator", "vector fields u and v evaluated at cell center This is not a", "A*x + t Parameters ---------- points : nd-array (nbr_points, 2) coordinates of points", "Lagrange Strain Tensor for all grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid,", "G >>> array([[1., 3.], [5., 7.]]) # Strain Tensor E = 0.5*( G", "E = 1/2*( grad(u) + grad(u)^T ) Parameters ---------- xgrid, ygrid : 2d", ":, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u,", "The xy-grid has to be rectangular. used to computed the \"Displacement gradient tensor\"", "v = 5*xgrid + 7*ygrid E = get_LagrangeStrainTensor(xgrid, ygrid, u, v) # array([[[[14.,", "p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u,", "--- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u", "y) displacements : nd-array (nbr_points, 2) displacement for each point (u, v) could", ":] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref def", "+ u[:, :-1]) v_center_y = 0.5*(v[1:, :] + v[:-1, :]) v_center_x = 0.5*(v[:,", "dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]] #", "Hastie) #sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1)) #sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M,", "= np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1)) # Residuals: u_linear = np.matmul( M, p_ux ) v_linear = np.matmul(", "Returns ------- nd-array (2, 3) coefficients matrix (affine transformation + translation) nd-array (nbr_points,", "+ xgrid[:-1, 1:] + xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\"", "+ xgrid[:-1, :-1]) return center_x, center_y def cellcentered_diff_2D(u, v): \"\"\" for a given", "- 1, nbr points, 2)` Returns ------- 3D array of shape `(nbr images,", "grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u,", "7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx)) np.testing.assert_almost_equal(dudy,", "nd-array (2, 3) coefficients matrix (affine transformation + translation) nd-array (nbr_points, 2) residuals", "ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7)) u = 1*xgrid + 3*ygrid", ">>> array([[1., 3.], [5., 7.]]) # Strain Tensor E = 0.5*( G +", "v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx, du_y/dy], [dv_x/dx, dv_y/dy]]", "7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v): \"\"\"Lagrange Strain Tensor (E) F", "Lagrange Strain Tensor E = 0.5*( np.einsum('...ki,...kj', F, F) - Id ) return", "v_linear residuals_xy = np.vstack([residuals_x, residuals_y]).T # Merge with ignored NaN values: residuals_NaN =", "a───b │ + │ c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y = (ua+ub)/2", "du_y, dv_x, dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid)", "Parameters ---------- displ_img_to_img : 3D array 3D array of shape `(nbr images -", "(affine transformation + translation) nd-array (nbr_points, 2) residuals for each points \"\"\" u,", "coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see p47 T. Hastie) #sigma_hat_x", "u, v): \"\"\"Lagrange Strain Tensor (E) F = grad(u) + Id E =", "with ignored NaN values: residuals_NaN = np.full(displacements.shape, np.nan) residuals_NaN[mask, :] = residuals_xy return", "output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v)", "# --- test get_LagrangeStrainTensor xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(1, 5, 7))", "get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain Tensor (E) E = 1/2*( grad(u)", "Id ) Parameters ---------- xgrid, ygrid : 2d arrays of shape (n_y, n_x)", "= np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0)", "= 2*xgrid + 7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v)", ":, :] displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0) displ_image_to_ref = np.cumsum(displ_zero, axis=0) return displ_image_to_ref", "[u, v](x, y) sampled on a grid returns the centered finite difference for", "rcond=None) coefficients = np.vstack([p_ux, p_uy]) ## Unbiased estimator variance (see p47 T. Hastie)", "= np.meshgrid(np.linspace(-1, 1, 5)**2, np.linspace(1, 5, 7)**0.5) u = 5*xgrid + 3*ygrid v", "1/2*( FF^T - Id ) Parameters ---------- xgrid, ygrid : 2d arrays of", "obtain image-to-reference displacement, add zeros at the begining Parameters ---------- displ_img_to_img : 3D", "at cell center This is not a proper bilinear interpolation (ie. quad4 element).", "- Id ) Parameters ---------- xgrid, ygrid : 2d arrays of shape (n_y,", "axis=2) # u = 1*xgrid + 3*ygrid # v = 5*xgrid + 7*ygrid", "axis=3) G = np.transpose(G, axes=(0, 1, 3, 2)) # G >>> array([[1., 3.],", "1 ,0], 23*np.ones_like(E[:, :, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v):", "1:] - u_center_y[:, :-1] delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x =", "np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx)) np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx)) # --- def get_LagrangeStrainTensor(xgrid, ygrid, u, v):", "+ 7*ygrid (dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v) np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx))", "---------- xgrid, ygrid : 2d arrays of shape (n_y, n_x) underformed grid points", "np.ones_like(x) M = np.vstack([x, y, ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v,", "def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small Displacement Strain Tensor (E) E = 1/2*(", "for each points \"\"\" u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u,", "ygrid, u, v): \"\"\"Lagrange Strain Tensor (E) F = grad(u) + Id E", "cellcentered_grad_rect2D(xgrid, ygrid, u, v) grad_u = np.stack(grad_u, axis=2) grad_v = np.stack(grad_v, axis=2) #", "displ_image_to_ref def get_center_points(xgrid, ygrid): \"\"\"Cell center point coordinates\"\"\" center_y = 0.25*(ygrid[1:, 1:] +", "2) displacement for each point (u, v) could include NaN Returns ------- nd-array", "ones]).T p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None) p_ux, _residual_x, _rank, _s", "36.]], np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1],", "displacement value to obtain image-to-reference displacement, add zeros at the begining Parameters ----------", ",0], 23*np.ones_like(E[:, :, 0 ,1])) # --- def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v): \"\"\"Small", "3, 2)) # G >>> array([[1., 3.], [5., 7.]]) # Strain Tensor E", "Residuals: u_linear = np.matmul( M, p_ux ) v_linear = np.matmul( M, p_uy )", ",1], 36*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1]))", "each cell Cell abcd: a───b │ + │ c───d du_x = (ub+ud)/2 -", "u_center_x[1:, :] - u_center_x[:-1, :] delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1] delta_v_y", "shape `(nbr images - 1, nbr points, 2)` Returns ------- 3D array of", "= 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1]) center_x", ":, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :,", "2) Lagrange Strain Tensor for all grid points \"\"\" grad_u, grad_v = cellcentered_grad_rect2D(xgrid,", "u, v = u[mask], v[mask] x, y = points[mask, :].T ones = np.ones_like(x)", "computed the \"Displacement gradient tensor\" see Bower p.14 output: (dudx, dudy), (dvdx, dvdy)", "the begining Parameters ---------- displ_img_to_img : 3D array 3D array of shape `(nbr", "abcd: a───b │ + │ c───d du_x = (ub+ud)/2 - (ua+uc)/2 du_y =", "Id[:, :] = np.eye(2, 2) # Id[0, 0] >> array([[1., 0.], [0., 1.]])", "add zeros at the begining zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :] displ_zero = np.concatenate([zeros,", "u, v): \"\"\"Small Displacement Strain Tensor (E) E = 1/2*( grad(u) + grad(u)^T", "v](x, y) sampled on a grid returns the centered finite difference for each", "---------- points : nd-array (nbr_points, 2) coordinates of points (x, y) displacements :", "tensor\" see Bower p.14 output: (dudx, dudy), (dvdx, dvdy) \"\"\" du_x, du_y, dv_x,", "u, v = displacements.T mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v))) u, v = u[mask], v[mask]", "sampled on a grid returns the centered finite difference for each cell Cell", "array of shape `(nbr images - 1, nbr points, 2)` Returns ------- 3D", "dv_y = cellcentered_diff_2D(u, v) dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid) return [[du_x/dx,", "2)` Returns ------- 3D array of shape `(nbr images, nbr points, 2)` \"\"\"", "used to computed the \"Displacement gradient tensor\" see Bower p.14 output: (dudx, dudy),", "0 ,0], 14*np.ones_like(E[:, :, 0 ,1])) np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0" ]
[ "import torch import torch.nn as nn import torch.nn.functional as F import sys import", "x): assert self.weight is not None and self.bias is not None, \"Please assign", "norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim)", "anatomical information from the image. \"\"\" self.width = width self.height = height self.ndf", "bilinear) self.up2 = Up(512, 256 // factor, bilinear) self.up3 = Up(256, 128 //", "nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'none' or", "nn.Linear(self.ndf * 8 * 7 * 7, 1) self.main = nn.Sequential(*self.main) def forward(self,", "= None # just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def", "#64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2,", "anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect')", "self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma =", "i in range(n_blk - 2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model +=", "= Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask,", "as F import sys import time from models.unet_parts import * from models.blocks import", "import * from models.spade_resblk import * device = torch.device('cuda:0') # content class Segmentor(nn.Module):", "= nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() -", "= nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim)", "model num_adain_params = 0 for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params", "output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf, z_length, norm, upsample,", "pad_type=pad_type)] for i in range(2): self.model += [Conv2dBlock(dim, 2 * dim, 4, 2,", "class AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\"", "Up(512, 256 // factor, bilinear) self.up3 = Up(256, 128 // factor, bilinear) self.up4", "ndf self.num_output_channels = num_output_channels self.norm = norm self.upsample = upsample self.unet = UNet(n_channels=1,", "2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4,", "\"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1 # check again self.conv1 =", "= z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample = upsample self.num_classes =", "z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input width", "1, 1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out =", "and self.bias is not None, \"Please assign weight and bias before calling AdaIN!\"", "= Down(512, 1024 // factor) self.up1 = Up(1024, 512 // factor, bilinear) self.up2", "pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True # initialize padding if pad_type == 'reflect':", "else: assert 0, \"Unsupported activation: {}\".format(activation) def forward(self, x): out = self.fc(x) if", "')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features", "norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm ==", "= num_classes + 1 self.main = [] # input is (nc) x 224", "= nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type: {}\".format(pad_type) # initialize normalization norm_dim", "[1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape)", "import torch.nn.functional as F import sys import time from models.unet_parts import * from", "bilinear) self.up3 = Up(256, 128 // factor, bilinear) self.up4 = Up(128, 64, bilinear)", "= self.conv2(out) out = self.pred(out) out = F.softmax(out, dim=1) return out class AEncoder(nn.Module):", "is not None and self.bias is not None, \"Please assign weight and bias", "1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 # use reflection padding in", "norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim)", "self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model)", "for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params", "ndf * 8, 4, 2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8,", "content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels =", "nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type: {}\".format(pad_type) # initialize normalization norm_dim =", "def assign_adain_params(self, adain_params, model): # assign the adain_params to the AdaIN layers in", "num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1 # check", "self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance norm x_reshaped = x.contiguous().view(1, b *", "decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h,", "self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self,", "type: {}\".format(pad_type) # initialize normalization norm_dim = output_dim if norm == 'bn': self.norm", "norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim,", "MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model =", "1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x)", "1, bias=False))] #1024x7x7 # state size. (ndf*16) x 14 x 14 self.out =", "== 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)", "self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate", "n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2", "-1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape", "+= [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] #", "self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5,", "num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP", "x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5", "1, 0) def forward(self, x): out = self.conv1(x) out = self.conv2(out) out =", "class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine", "[nn.AdaptiveAvgPool2d(1)] # global average pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model", "+ str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__()", "are dynamically assigned self.weight = None self.bias = None # just dummy buffers,", "out = self.fc(x) if self.norm: out = self.norm(out) if self.activation: out = self.activation(out)", "0, \"Unsupported activation: {}\".format(activation) def forward(self, x): out = self.fc(x) if self.norm: out", "4, 2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8,", "global average pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model)", "forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 =", "* from models.distance_corr import * from models.spade_resblk import * device = torch.device('cuda:0') #", "pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model = [] self.model += [Conv2dBlock(1, dim,", "= True # initialize fully connected layer if norm == 'sn': self.fc =", "torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not", "MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self, a, z, type): # reconstruct", "self).__init__() \"\"\" Args: width: input width height: input height upsample: upsampling type (nearest", "the two lines listed below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean", "# return the number of AdaIN parameters needed by the model num_adain_params =", "x.size(0) down_out = self.main(x) down_out = down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1,", "/ (std + self.eps) if self.affine: shape = [1, -1] + [1] *", "# z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x)", "= nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation ==", "SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels):", "std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) >", "= nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation ==", "= None return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out,", "width height: input height upsample: upsampling type (nearest | bilateral) nclasses: number of", "self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__()", "- 1) # print(x.size()) if x.size(0) == 1: # These two lines run", "elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'none' or norm", "LayerNorm(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else:", "self.num_classes = num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1)", "running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance norm x_reshaped = x.contiguous().view(1,", "return the number of AdaIN parameters needed by the model num_adain_params = 0", "= output_dim if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in':", "= self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance norm x_reshaped = x.contiguous().view(1, b", "-1) output = self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height,", "3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x) class", "= Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate AdaIN parameters self.mlp =", "activation=activ, pad_type=pad_type)] dim //= 2 # use reflection padding in the last conv", "+ [1] * (x.dim() - 1) # print(x.size()) if x.size(0) == 1: #", "2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 #", "Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x)", "// factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def", "pytorch 0.4 than the two lines listed below. mean = x.view(-1).mean().view(*shape) std =", "activation='relu'): super(LinearBlock, self).__init__() use_bias = True # initialize fully connected layer if norm", "v / (v.norm() + eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'):", "ndf, 4, 2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf,", "= nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh':", "ndf * 8, 4, 2, 1, bias=False))] #1024x7x7 # state size. (ndf*16) x", "# input is (nc) x 224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4,", "* 2, 4, 2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf", "= True # initialize padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif", "self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4)", "= None else: assert 0, \"Unsupported normalization: {}\".format(norm) # initialize activation if activation", "self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i in", "super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to", "num_adain_params = 0 for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params +=", "reconstruct an image images_recon = self.decode(a, z) return images_recon def decode(self, content, style):", "= self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return", "# weight and bias are dynamically assigned self.weight = None self.bias = None", "super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine:", "[nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main +=", "self.num_features = num_features self.eps = eps self.momentum = momentum # weight and bias", "2): # self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model", "= norm self.upsample = upsample self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels =", "upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder to extract anatomical information from the", "'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert", "x3) x = self.up3(x, x2) x = self.up4(x, x1) logits = self.outc(x) return", "kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x =", "'none' or norm == 'sn': self.norm = None else: assert 0, \"Unsupported normalization:", "+= [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim def", "assert 0, \"Unsupported activation: {}\".format(activation) # initialize convolution if norm == 'sn': self.conv", "(v.norm() + eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__()", "1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model +=", "i in range(2): self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm,", "// factor) self.up1 = Up(1024, 512 // factor, bilinear) self.up2 = Up(512, 256", "* 2, ndf * 4, 4, 2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf", "num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes = num_classes + 1 self.main =", "out class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels", "import * from models.blocks import * from models.rounding import * from models.spectral_norm import", "z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out): reco = self.decoder(a_out,", "2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')]", "= (x - mean) / (std + self.eps) if self.affine: shape = [1,", "1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2,", "c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class", "= num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu',", "and style codes to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images =", "Down(256, 512) factor = 2 if bilinear else 1 self.down4 = Down(512, 1024", "= self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x, x1) logits =", "norm == 'none' or norm == 'sn': self.norm = None else: assert 0,", "mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean)", "\"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain',", "'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm", "to generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def", "dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model = [] self.model += [LinearBlock(input_dim, dim,", "or script_type == 'test': z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde", "mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out): reco = self.decoder(a_out, z_out,", "pad_type='reflect') # MLP to generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3,", "pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim =", "self.conv2(out) out = self.pred(out) out = F.softmax(out, dim=1) return out class AEncoder(nn.Module): def", "norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 # use reflection padding in the last", "if x.size(0) == 1: # These two lines run much faster in pytorch", "pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type: {}\".format(pad_type)", "== 'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) # initialize", "\"Please assign weight and bias before calling AdaIN!\" b, c = x.size(0), x.size(1)", "in range(3): self.model += [Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln', activation=activ,", "nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation", "# state size. (ndf*16) x 14 x 14 self.out = nn.Linear(self.ndf * 8", "self.outc = OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1)", "m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:,", "forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__()", "= self.norm(out) if self.activation: out = self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self,", "activation: {}\".format(activation) # initialize convolution if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim,", "self.ndf = ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample", "assert 0, \"Unsupported activation: {}\".format(activation) def forward(self, x): out = self.fc(x) if self.norm:", "*= 2 # for i in range(n_downsample - 2): # self.model += [Conv2dBlock(dim,", "self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim = output_dim if norm", "models.spade_resblk import * device = torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self, num_output_channels,", "self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type == 'val' or script_type == 'test': z_out", "* 7 * 7, 1) self.main = nn.Sequential(*self.main) def forward(self, x): b_size =", "128 // factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64, n_classes)", "bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc", "224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2,", "self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type == 'val' or script_type ==", "width self.ndf = ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm", "norm, activ, pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model = [] self.model +=", "= down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self,", "self.up3(x, x2) x = self.up4(x, x1) logits = self.outc(x) return logits # style", "dim=1) out = self.rounding(out) return out class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True):", "bilateral) nclasses: number of semantice segmentation classes \"\"\" self.h = height self.w =", "self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder", "information from the image. \"\"\" self.width = width self.height = height self.ndf =", "# decode content and style codes to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params,", "torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out)", "self.model = [] self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)]", "self.up1 = Up(1024, 512 // factor, bilinear) self.up2 = Up(512, 256 // factor,", "self).__init__() self.model = [] # upsampling blocks for i in range(3): self.model +=", "x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x", "self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1,", "else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim = output_dim if", "ndf * 4, 4, 2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4,", "== 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif", "super(AEncoder, self).__init__() \"\"\" Build an encoder to extract anatomical information from the image.", "norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder to extract anatomical information from", "> 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return the number", "self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm ==", "self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none',", "upsample self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length,", "__init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear", "self.num_mask_channels) def forward(self, x, mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device)", "= nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1]", "def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = [] #", "initialize activation if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu':", "x = self.up3(x, x2) x = self.up4(x, x1) logits = self.outc(x) return logits", "listed below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape)", "def __init__(self, width, height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet,", "images = self.dec(content) return images def assign_adain_params(self, adain_params, model): # assign the adain_params", "return out class AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels, norm, upsample): super(AEncoder,", "return self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP,", "self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels,", "8 * 7 * 7, 1) self.main = nn.Sequential(*self.main) def forward(self, x): b_size", "norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input width height:", "the AdaIN layers in model for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\":", "style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim def forward(self, x):", "from models.blocks import * from models.rounding import * from models.spectral_norm import * from", "2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4,", "activations self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def", "x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance norm x_reshaped =", "4, 2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8,", "* 8, ndf * 8, 4, 2, 1, bias=False))] #1024x7x7 # state size.", "class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model =", "#1024x7x7 # state size. (ndf*16) x 14 x 14 self.out = nn.Linear(self.ndf *", "model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features]", "calling AdaIN!\" b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b)", "x4) x = self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x, x1)", "= [] self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for", "if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias", "lines listed below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0),", "factor = 2 if bilinear else 1 self.down4 = Down(512, 1024 // factor)", "conv layer self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model", "self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm,", "if self.activation: x = self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm,", "nclasses: number of semantice segmentation classes \"\"\" self.h = height self.w = width", "torch import torch.nn as nn import torch.nn.functional as F import sys import time", "//= 2 # use reflection padding in the last conv layer self.model +=", "input height upsample: upsampling type (nearest | bilateral) nclasses: number of semantice segmentation", "ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input", "= nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain':", "+= [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no", "Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = []", "True # initialize fully connected layer if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim,", "running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self):", "'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type: {}\".format(pad_type) # initialize", "'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) # initialize convolution", "num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum =", "self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1 # check again self.conv1", "range(3): self.model += [Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)]", "* from models.rounding import * from models.spectral_norm import * from models.distance_corr import *", "eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape", "<gh_stars>1-10 import torch import torch.nn as nn import torch.nn.functional as F import sys", "# for i in range(n_downsample - 2): # self.model += [Conv2dBlock(dim, dim, 4,", "elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm =", "= nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0) down_out = self.main(x) down_out =", "= self.rounding(out) return out class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__()", "blocks for i in range(3): self.model += [Conv2dBlock(dim, dim // 2, 3, 1,", "def forward(self, x): assert self.weight is not None and self.bias is not None,", "1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2,", "upsample: upsampling type (nearest | bilateral) nclasses: number of semantice segmentation classes \"\"\"", "= self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return reco, z_out,", "self.norm = None else: assert 0, \"Unsupported normalization: {}\".format(norm) # initialize activation if", "\"Unsupported padding type: {}\".format(pad_type) # initialize normalization norm_dim = output_dim if norm ==", "def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes = num_classes +", "from models.spectral_norm import * from models.distance_corr import * from models.spade_resblk import * device", "eps self.momentum = momentum # weight and bias are dynamically assigned self.weight =", "self.norm(out) if self.activation: out = self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self, input_dim", "1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self, x): out", "n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1 =", "self.up2 = Up(512, 256 // factor, bilinear) self.up3 = Up(256, 128 // factor,", "[Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2", "# decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length,", "self.dec) images = self.dec(content) return images def assign_adain_params(self, adain_params, model): # assign the", "= self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x =", "= x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if", "no output activations self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1)) class", "Up(256, 128 // factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64,", "= ndf self.num_classes = num_classes + 1 self.main = [] # input is", "norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor =", "if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class", "Args: width: input width height: input height upsample: upsampling type (nearest | bilateral)", "[Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i in range(2): self.model", "= num_output_channels self.num_classes = num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3,", "if self.norm: out = self.norm(out) if self.activation: out = self.activation(out) return out class", "needed by the model num_adain_params = 0 for m in model.modules(): if m.__class__.__name__", "self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)]", "2 # for i in range(n_downsample - 2): # self.model += [Conv2dBlock(dim, dim,", "std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x", "type): # reconstruct an image images_recon = self.decode(a, z) return images_recon def decode(self,", "self.rounding(out) return out class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels", "of semantice segmentation classes \"\"\" self.h = height self.w = width self.ndf =", "out class AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__()", "# global average pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model =", "__init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain',", "LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True #", "def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation:", "in range(n_blk - 2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim,", "seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out): reco = self.decoder(a_out, z_out, self.decoder_type) return", "class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels", "return out class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels =", "* 4, ndf * 8, 4, 2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf", "7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i in range(2): self.model += [Conv2dBlock(dim,", "= 64 self.model = [] self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm,", "def decode(self, content, style): # decode content and style codes to an image", "= [] # input is (nc) x 224 x 224 self.main += [nn.Conv2d(self.num_classes,", "self.activation: x = self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ,", "class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder,", "x): out = self.conv1(x) out = self.conv2(out) out = self.pred(out) out = F.softmax(out,", "norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model += [nn.Conv2d(dim,", "= x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight,", "decode(self, content, style): # decode content and style codes to an image adain_params", "activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh()", "forward(self, x): return self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk, norm='none',", "ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder to extract anatomical", "[1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return", "self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize", "4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 # for i in", "1) self.conv2 = conv_bn_relu(64, 64, 3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1,", "than the two lines listed below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else:", "semantice segmentation classes \"\"\" self.h = height self.w = width self.ndf = ndf", "nn.Sequential(*self.model) self.output_dim = dim def forward(self, x): return self.model(x) # decoder class Ada_Decoder(nn.Module):", "[LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations self.model = nn.Sequential(*self.model) def forward(self,", "+= [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *=", "None mu_out = None #t0 = time.time() if script_type == 'training': reco =", "much faster in pytorch 0.4 than the two lines listed below. mean =", "self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return v / (v.norm() +", "norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim,", "None else: assert 0, \"Unsupported normalization: {}\".format(norm) # initialize activation if activation ==", "None self.bias = None # just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var',", "bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim = output_dim", "'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm", "range(n_blk - 2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim,", "shape = [1, -1] + [1] * (x.dim() - 2) x = x", "(nearest | bilateral) nclasses: number of semantice segmentation classes \"\"\" self.h = height", "x.size(0) == 1: # These two lines run much faster in pytorch 0.4", "num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect')", "time from models.unet_parts import * from models.blocks import * from models.rounding import *", "x def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LinearBlock(nn.Module): def", "= nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'none'", "if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation =", "self.num_output_channels = num_output_channels self.num_classes = num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels, 64,", "* 8, 4, 2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf", "output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm", "AdaIN layers in model for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean", "2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v, eps=1e-12):", "1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim def forward(self, x): return", "3, norm='none', activ='relu') def forward(self, a, z, type): # reconstruct an image images_recon", "x 14 self.out = nn.Linear(self.ndf * 8 * 7 * 7, 1) self.main", "self.bias = None # just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features))", "self.norm = norm self.upsample = upsample self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels", "x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return", "adain_params, model): # assign the adain_params to the AdaIN layers in model for", "= self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images def assign_adain_params(self, adain_params, model):", "= UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self, x): out = self.unet(x)", "= StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample)", "def forward(self, a, z, type): # reconstruct an image images_recon = self.decode(a, z)", "pad_type='zero'): super(Decoder, self).__init__() self.model = [] # upsampling blocks for i in range(3):", "input width height: input height upsample: upsampling type (nearest | bilateral) nclasses: number", "self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__()", "bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x", "codes to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return", "- 2): # self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]", "= width self.ndf = ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm =", "None else: assert 0, \"Unsupported activation: {}\".format(activation) def forward(self, x): out = self.fc(x)", "bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if", "auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec", "images def assign_adain_params(self, adain_params, model): # assign the adain_params to the AdaIN layers", "ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes = num_classes + 1 self.main", "norm='none', activ='relu'): super(MLP, self).__init__() self.model = [] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)]", "nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1)", "4, 2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf", "self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x", "== 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else:", "bias are dynamically assigned self.weight = None self.bias = None # just dummy", "self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf", "mean = adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight =", "2, 4, 2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf *", "connected layer if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc", "out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps) return out.view(b,", "padding type: {}\".format(pad_type) # initialize normalization norm_dim = output_dim if norm == 'bn':", "= upsample self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder =", "b * c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True,", "in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module):", "= DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 =", "= adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1)", "Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes = num_classes", "if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm =", "* from models.blocks import * from models.rounding import * from models.spectral_norm import *", "= self.dec(content) return images def assign_adain_params(self, adain_params, model): # assign the adain_params to", "import * from models.spectral_norm import * from models.distance_corr import * from models.spade_resblk import", "\"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels,", "z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred", "z, type): # reconstruct an image images_recon = self.decode(a, z) return images_recon def", "time.time() if script_type == 'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco)", "= bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128,", "= num_features self.eps = eps self.momentum = momentum # weight and bias are", "AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else:", "nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu':", "self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) # initialize convolution if", "__init__(self, width, height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__()", "= Up(256, 128 // factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc =", "F.softmax(out, dim=1) return out class AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels, norm,", "'val' or script_type == 'test': z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type)", "b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply", "x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias,", "+= [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]", "norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 # for i in range(n_downsample - 2):", "= adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return the number of AdaIN parameters", "output_dim, bias=use_bias) # initialize normalization norm_dim = output_dim if norm == 'bn': self.norm", "nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim = output_dim if norm == 'bn':", "out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')'", "ndf self.num_classes = num_classes + 1 self.main = [] # input is (nc)", "== 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True)", "activation=activ, pad_type=pad_type)] dim *= 2 # for i in range(n_downsample - 2): #", "__init__(self, width, height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder", "def get_num_adain_params(self, model): # return the number of AdaIN parameters needed by the", "models.rounding import * from models.spectral_norm import * from models.distance_corr import * from models.spade_resblk", "RoundLayer() def forward(self, x): out = self.unet(x) out = F.softmax(out, dim=1) out =", "2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf *", "fully connected layer if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else:", "self.up3 = Up(256, 128 // factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc", "self.decode(a, z) return images_recon def decode(self, content, style): # decode content and style", "nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif", "track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm", "self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))] #256x28x28", "activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = [] # upsampling blocks for i in", "self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self, x): out = self.conv1(x)", "8, 4, 2, 1, bias=False))] #1024x7x7 # state size. (ndf*16) x 14 x", "dim, norm=norm, activation=activ)] for i in range(n_blk - 2): self.model += [LinearBlock(dim, dim,", "# initialize padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type ==", "pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes)", "'ln': self.norm = LayerNorm(norm_dim) elif norm == 'none' or norm == 'sn': self.norm", "running_var, self.weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return", "__init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias =", "self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation ==", "self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder", "[SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))] #256x28x28 self.main +=", "norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True # initialize fully connected layer if", "models.spectral_norm import * from models.distance_corr import * from models.spade_resblk import * device =", "forward(self, a, z, type): # reconstruct an image images_recon = self.decode(a, z) return", "128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) factor = 2 if", "= self.unet(x) out = F.softmax(out, dim=1) out = self.rounding(out) return out class UNet(nn.Module):", "= n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1", "// factor, bilinear) self.up2 = Up(512, 256 // factor, bilinear) self.up3 = Up(256,", "dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = [] # upsampling blocks", "\"\"\" Build an encoder to extract anatomical information from the image. \"\"\" self.width", "norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm ==", "= [-1] + [1] * (x.dim() - 1) # print(x.size()) if x.size(0) ==", "self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None mu_out = None", "self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x,", "decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input width height: input height", "input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True", "self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64)", "AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\"", "std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): #", "size. (ndf*16) x 14 x 14 self.out = nn.Linear(self.ndf * 8 * 7", "x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x)", "x): shape = [-1] + [1] * (x.dim() - 1) # print(x.size()) if", "import * device = torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes):", "def forward(self, x, mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out", "extract anatomical information from the image. \"\"\" self.width = width self.height = height", "below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std", "None return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out):", "parameters needed by the model num_adain_params = 0 for m in model.modules(): if", "None, \"Please assign weight and bias before calling AdaIN!\" b, c = x.size(0),", "def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine", "nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm:", "num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps =", "nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm", "= SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization", "= self.fc(x) if self.norm: out = self.norm(out) if self.activation: out = self.activation(out) return", "bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1,", "reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out): reco =", "None # just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self,", "logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features", "Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) factor = 2", "an image images_recon = self.decode(a, z) return images_recon def decode(self, content, style): #", "self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in range(n_blk - 2): self.model", "2, 1, bias=False))] #1024x7x7 # state size. (ndf*16) x 14 x 14 self.out", "x): out = self.fc(x) if self.norm: out = self.norm(out) if self.activation: out =", "norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x) class MLP(nn.Module):", "1) self.main = nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0) down_out = self.main(x)", "3, 1, 1) self.conv2 = conv_bn_relu(64, 64, 3, 1, 1) self.pred = nn.Conv2d(64,", "num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu',", "activ='relu'): super(MLP, self).__init__() self.model = [] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for", "= eps self.momentum = momentum # weight and bias are dynamically assigned self.weight", "These two lines run much faster in pytorch 0.4 than the two lines", "\"\"\" self.width = width self.height = height self.ndf = ndf self.num_output_channels = num_output_channels", "Down(128, 256) self.down3 = Down(256, 512) factor = 2 if bilinear else 1", "Build an encoder to extract anatomical information from the image. \"\"\" self.width =", "affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if", "range(2): self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]", "self.output_dim = dim def forward(self, x): return self.model(x) # decoder class Ada_Decoder(nn.Module): #", "self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations self.model = nn.Sequential(*self.model)", "== 'sn': self.norm = None else: assert 0, \"Unsupported normalization: {}\".format(norm) # initialize", "self.w = width self.ndf = ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm", "just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert", "= F.softmax(out, dim=1) return out class AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels,", "num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta", "self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images def assign_adain_params(self, adain_params, model): #", "normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm", "AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build", "z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return reco,", "1: # These two lines run much faster in pytorch 0.4 than the", "if bilinear else 1 self.down4 = Down(512, 1024 // factor) self.up1 = Up(1024,", "x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf", "True # initialize padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type", "width, height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\"", "* 8, 4, 2, 1, bias=False))] #1024x7x7 # state size. (ndf*16) x 14", "x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine:", "padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad", "__init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = [] # upsampling", "'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type", "x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape)", "weight and bias are dynamically assigned self.weight = None self.bias = None #", "= None #t0 = time.time() if script_type == 'training': reco = self.decoder(a_out, z_out,", "pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x) class MLP(nn.Module): def __init__(self,", "class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes =", "StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor", "= anatomy_out_channels self.norm = norm self.upsample = upsample self.num_classes = num_classes self.decoder_type =", "self.weight is not None and self.bias is not None, \"Please assign weight and", "self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size,", "activation: {}\".format(activation) def forward(self, x): out = self.fc(x) if self.norm: out = self.norm(out)", "conv_bn_relu(64, 64, 3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0) def", "nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes):", "= self.norm(x) if self.activation: x = self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self,", "[SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False))] #1024x7x7 # state", "= self.up3(x, x2) x = self.up4(x, x1) logits = self.outc(x) return logits #", "1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf, z_length, norm, upsample, decoder_type,", "AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type,", "from models.unet_parts import * from models.blocks import * from models.rounding import * from", "out = F.softmax(out, dim=1) out = self.rounding(out) return out class UNet(nn.Module): def __init__(self,", "self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x) class MLP(nn.Module): def __init__(self, input_dim,", "num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder to extract anatomical information", "[] # input is (nc) x 224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf,", "x = self.norm(x) if self.activation: x = self.activation(x) return x class StyleEncoder(nn.Module): def", "out = self.pred(out) out = F.softmax(out, dim=1) return out class AEncoder(nn.Module): def __init__(self,", "segmentation classes \"\"\" self.h = height self.w = width self.ndf = ndf self.z_length", ",output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True #", "\"\"\" self.h = height self.w = width self.ndf = ndf self.z_length = z_length", "[Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global", "x): b_size = x.size(0) down_out = self.main(x) down_out = down_out.view(b_size, -1) output =", "Up(1024, 512 // factor, bilinear) self.up2 = Up(512, 256 // factor, bilinear) self.up3", "+ 1 self.main = [] # input is (nc) x 224 x 224", "sys import time from models.unet_parts import * from models.blocks import * from models.rounding", "super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1 # check again", "14 self.out = nn.Linear(self.ndf * 8 * 7 * 7, 1) self.main =", "self.down3 = Down(256, 512) factor = 2 if bilinear else 1 self.down4 =", "stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x)", "eps=1e-12): return v / (v.norm() + eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim,", "4, ndf * 8, 4, 2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf *", "self.use_bias = True # initialize padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding)", "2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder,", "initialize normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif", "out = self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride,", "x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std", "normalization: {}\".format(norm) # initialize activation if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif", "[-1] + [1] * (x.dim() - 1) # print(x.size()) if x.size(0) == 1:", "self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d,", "torch.nn as nn import torch.nn.functional as F import sys import time from models.unet_parts", "self.weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__", "self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class StyleEncoder(nn.Module):", "return num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__()", "self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self,", "#512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False))]", "2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 # use reflection", "self).__init__() self.model = [] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in", "= self.running_var.repeat(b) # Apply instance norm x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])", "mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std =", "== \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1,", "self.fc(x) if self.norm: out = self.norm(out) if self.activation: out = self.activation(out) return out", "pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding)", "dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average", "activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels,", "in model for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:,", "'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif", "down_out = down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def", "num_features self.eps = eps self.momentum = momentum # weight and bias are dynamically", "activ='relu', pad_type='reflect') # MLP to generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256,", "= x * self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return v", "self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self, a, z, type): # reconstruct an", "norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'none' or norm ==", "in range(n_downsample - 2): # self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm,", "upsampling blocks for i in range(3): self.model += [Conv2dBlock(dim, dim // 2, 3,", "self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm =", "import time from models.unet_parts import * from models.blocks import * from models.rounding import", "adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images def assign_adain_params(self, adain_params,", "self.segmentor(a_out) logvar_out = None mu_out = None #t0 = time.time() if script_type ==", "self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out", "def __init__(self, width, height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an", "elif activation == 'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation)", "self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x,", "= x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0),", "'(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm,", "norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim)", "super(SDNet, self).__init__() \"\"\" Args: width: input width height: input height upsample: upsampling type", "F import sys import time from models.unet_parts import * from models.blocks import *", "== 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif", "output_dim, norm='none', activation='none')] # no output activations self.model = nn.Sequential(*self.model) def forward(self, x):", "mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def", "and bias are dynamically assigned self.weight = None self.bias = None # just", "self.height = height self.ndf = ndf self.num_output_channels = num_output_channels self.norm = norm self.upsample", "out = self.rounding(out) return out class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet,", "2, ndf * 4, 4, 2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf *", "the adain_params to the AdaIN layers in model for m in model.modules(): if", "return v / (v.norm() + eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none',", "width: input width height: input height upsample: upsampling type (nearest | bilateral) nclasses:", "image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images def assign_adain_params(self,", "= num_output_channels self.norm = norm self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True)", "-1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) +", "out = self.unet(x) out = F.softmax(out, dim=1) out = self.rounding(out) return out class", "__init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model = []", "def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias", "= self.conv1(x) out = self.conv2(out) out = self.pred(out) out = F.softmax(out, dim=1) return", "self.n_classes = n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64,", "2 # use reflection padding in the last conv layer self.model += [Conv2dBlock(dim,", "input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model = [] self.model +=", "models.unet_parts import * from models.blocks import * from models.rounding import * from models.spectral_norm", "def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps", "self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum # weight and", "z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type == 'val' or script_type == 'test':", "# initialize convolution if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride,", "bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x) x2 =", "style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model = [] self.model", "std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps)", "x 14 x 14 self.out = nn.Linear(self.ndf * 8 * 7 * 7,", "1 self.down4 = Down(512, 1024 // factor) self.up1 = Up(1024, 512 // factor,", "x = self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x, x1) logits", "[Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self,", "i in range(n_downsample - 2): # self.model += [Conv2dBlock(dim, dim, 4, 2, 1,", "256) self.down3 = Down(256, 512) factor = 2 if bilinear else 1 self.down4", "= self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf,", "self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1]", "self.model = nn.Sequential(*self.model) self.output_dim = dim def forward(self, x): return self.model(x) # decoder", "content and style codes to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images", "activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x) class MLP(nn.Module): def", "z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample = upsample self.num_classes = num_classes", "super(StyleEncoder, self).__init__() dim = 64 self.model = [] self.model += [Conv2dBlock(1, dim, 7,", "of AdaIN parameters needed by the model num_adain_params = 0 for m in", "in the last conv layer self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none',", "'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm", "= nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported", "x = self.up4(x, x1) logits = self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module):", "0, \"Unsupported activation: {}\".format(activation) # initialize convolution if norm == 'sn': self.conv =", "x5 = self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3) x =", "+= [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))] #256x28x28 self.main", "input is (nc) x 224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2,", "seg_pred = self.segmentor(a_out) logvar_out = None mu_out = None #t0 = time.time() if", "None #t0 = time.time() if script_type == 'training': reco = self.decoder(a_out, z_out, self.decoder_type)", "= self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type == 'val' or script_type", "elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm", "# style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features =", "norm='none', activation='none')] # no output activations self.model = nn.Sequential(*self.model) def forward(self, x): return", "= self.main(x) down_out = down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1, 1).squeeze(1) class", "forward(self, x, mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out =", "def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes =", "self.bias is not None, \"Please assign weight and bias before calling AdaIN!\" b,", "= self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0,", "= nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim)", "num_classes + 1 self.main = [] # input is (nc) x 224 x", "m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std =", "script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out =", "self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is", "self.down4 = Down(512, 1024 // factor) self.up1 = Up(1024, 512 // factor, bilinear)", "super(MLP, self).__init__() self.model = [] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i", "x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance norm x_reshaped", "if self.activation: out = self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim,", "self.decoder_type) mu_out_tilde = None return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def", "model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module): def", "down_out = self.main(x) down_out = down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1, 1).squeeze(1)", "x): return self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self,", "= height self.w = width self.ndf = ndf self.z_length = z_length self.anatomy_out_channels =", "reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type == 'val' or", "AdaIN parameters needed by the model num_adain_params = 0 for m in model.modules():", "last conv layer self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)]", "conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 = conv_bn_relu(64, 64, 3, 1, 1) self.pred", "+ self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return v / (v.norm() + eps)", "momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum #", "2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4,", "or norm == 'sn': self.norm = None else: assert 0, \"Unsupported normalization: {}\".format(norm)", "2 if bilinear else 1 self.down4 = Down(512, 1024 // factor) self.up1 =", "anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input width height: input height upsample:", "height self.ndf = ndf self.num_output_channels = num_output_channels self.norm = norm self.upsample = upsample", "factor) self.up1 = Up(1024, 512 // factor, bilinear) self.up2 = Up(512, 256 //", "height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder to extract", "class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model", "self.m_encoder(reco) elif script_type == 'val' or script_type == 'test': z_out = self.m_encoder(x) reco", "a, z, type): # reconstruct an image images_recon = self.decode(a, z) return images_recon", "decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu',", "self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x =", "== 'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type ==", "0) def forward(self, x): out = self.conv1(x) out = self.conv2(out) out = self.pred(out)", "== 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)", "self).__init__() \"\"\" Build an encoder to extract anatomical information from the image. \"\"\"", "dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i in range(2): self.model +=", "elif script_type == 'val' or script_type == 'test': z_out = self.m_encoder(x) reco =", "8, ndf * 8, 4, 2, 1, bias=False))] #1024x7x7 # state size. (ndf*16)", "+= [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in range(n_blk - 2): self.model +=", "= self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x, x2) x =", "elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation =", "decode content and style codes to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec)", "activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True # initialize padding if pad_type ==", "# These two lines run much faster in pytorch 0.4 than the two", "factor, bilinear) self.up3 = Up(256, 128 // factor, bilinear) self.up4 = Up(128, 64,", "= torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\"", "state size. (ndf*16) x 14 x 14 self.out = nn.Linear(self.ndf * 8 *", "= OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3", "= nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, \"Unsupported", "down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width,", "== 'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) def forward(self,", "x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x =", "(nc) x 224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)]", "import * from models.rounding import * from models.spectral_norm import * from models.distance_corr import", "faster in pytorch 0.4 than the two lines listed below. mean = x.view(-1).mean().view(*shape)", "= decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder =", "7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x): return", "1, 1) self.conv2 = conv_bn_relu(64, 64, 3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes,", "= num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf,", "= self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return reco, z_out, mu_out_tilde, a_out, seg_pred,", "input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True # initialize fully connected", "self.h = height self.w = width self.ndf = ndf self.z_length = z_length self.anatomy_out_channels", "self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm =", "1, 1, 0) def forward(self, x): out = self.conv1(x) out = self.conv2(out) out", "output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim =", "anatomy_out_channels self.norm = norm self.upsample = upsample self.num_classes = num_classes self.decoder_type = decoder_type", "x 224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112", "self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and", "= nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim = output_dim if norm ==", "elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation", "[SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))] #512x14x14 self.main +=", "self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): x1", "a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None mu_out = None #t0", "class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim = 64", "3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 # use reflection padding", "not None and self.bias is not None, \"Please assign weight and bias before", "x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x", "assign the adain_params to the AdaIN layers in model for m in model.modules():", "torch.nn.functional as F import sys import time from models.unet_parts import * from models.blocks", "if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim,", "style): # decode content and style codes to an image adain_params = self.mlp(style)", "- mean) / (std + self.eps) if self.affine: shape = [1, -1] +", "else: assert 0, \"Unsupported padding type: {}\".format(pad_type) # initialize normalization norm_dim = output_dim", "def forward(self, x): return self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture", "upsampling type (nearest | bilateral) nclasses: number of semantice segmentation classes \"\"\" self.h", "2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model", "def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def", "= ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample =", "dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight", "adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if", "UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self, x): out = self.unet(x) out", "// factor, bilinear) self.up3 = Up(256, 128 // factor, bilinear) self.up4 = Up(128,", "= output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in':", "self.down2 = Down(128, 256) self.down3 = Down(256, 512) factor = 2 if bilinear", "return self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self, decoder_type,", "padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True # initialize padding if", "as nn import torch.nn.functional as F import sys import time from models.unet_parts import", "1 self.main = [] # input is (nc) x 224 x 224 self.main", "else: assert 0, \"Unsupported activation: {}\".format(activation) # initialize convolution if norm == 'sn':", "x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var,", "x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4)", "the number of AdaIN parameters needed by the model num_adain_params = 0 for", "== 'none' or norm == 'sn': self.norm = None else: assert 0, \"Unsupported", "z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None mu_out", "= height self.ndf = ndf self.num_output_channels = num_output_channels self.norm = norm self.upsample =", "'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert", "256, 3, norm='none', activ='relu') def forward(self, a, z, type): # reconstruct an image", "out = F.softmax(out, dim=1) return out class AEncoder(nn.Module): def __init__(self, width, height, ndf,", "inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation", "models.distance_corr import * from models.spade_resblk import * device = torch.device('cuda:0') # content class", "output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def forward(self, x):", "activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None", "self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm ==", "if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x", "= [1, -1] + [1] * (x.dim() - 2) x = x *", "logits = self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5,", "forward(self, x): assert self.weight is not None and self.bias is not None, \"Please", "* dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 # for", "by the model num_adain_params = 0 for m in model.modules(): if m.__class__.__name__ ==", "- 2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none',", "x): out = self.unet(x) out = F.softmax(out, dim=1) out = self.rounding(out) return out", "height self.w = width self.ndf = ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels", "adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return the number of AdaIN", "return x def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LinearBlock(nn.Module):", "mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out", "self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' +", "= None else: assert 0, \"Unsupported activation: {}\".format(activation) def forward(self, x): out =", "used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None", "* from models.spectral_norm import * from models.distance_corr import * from models.spade_resblk import *", "def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels,", "None and self.bias is not None, \"Please assign weight and bias before calling", "self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length,", "nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] *", "if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self,", "number of semantice segmentation classes \"\"\" self.h = height self.w = width self.ndf", "-1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes", "self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self, a, z, type):", "to extract anatomical information from the image. \"\"\" self.width = width self.height =", "AdaIN!\" b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) #", "images_recon = self.decode(a, z) return images_recon def decode(self, content, style): # decode content", "+= [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))]", "[] # upsampling blocks for i in range(3): self.model += [Conv2dBlock(dim, dim //", "return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features,", "self.ndf = ndf self.num_classes = num_classes + 1 self.main = [] # input", "64, 3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self,", "= self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x", "num_output_channels self.norm = norm self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding", "self).__init__() dim = 64 self.model = [] self.model += [Conv2dBlock(1, dim, 7, 1,", "self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length,", "# print(x.size()) if x.size(0) == 1: # These two lines run much faster", "= 0 for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features", "= nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf,", "2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return the number of", "1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self, x): out =", "Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__()", "adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return the number of AdaIN parameters needed", "#t0 = time.time() if script_type == 'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde", "64, 3, 1, 1) self.conv2 = conv_bn_relu(64, 64, 3, 1, 1) self.pred =", "= None mu_out = None #t0 = time.time() if script_type == 'training': reco", "eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum", "script_type == 'val' or script_type == 'test': z_out = self.m_encoder(x) reco = self.decoder(a_out,", "a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out): reco = self.decoder(a_out, z_out, self.decoder_type)", "bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4,", "reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return reco, z_out, mu_out_tilde, a_out,", "+= [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))] #512x14x14 self.main", "self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim", "def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4", "= nn.Linear(self.ndf * 8 * 7 * 7, 1) self.main = nn.Sequential(*self.main) def", "== 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif", "'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation", "= eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x):", "[LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in range(n_blk - 2): self.model += [LinearBlock(dim,", "type (nearest | bilateral) nclasses: number of semantice segmentation classes \"\"\" self.h =", "+ '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True):", "bias=use_bias) # initialize normalization norm_dim = output_dim if norm == 'bn': self.norm =", "model): # return the number of AdaIN parameters needed by the model num_adain_params", "// 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 # use", "self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 =", "+= [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations self.model = nn.Sequential(*self.model) def", "in range(2): self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ,", "self.main = nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0) down_out = self.main(x) down_out", "assert 0, \"Unsupported padding type: {}\".format(pad_type) # initialize normalization norm_dim = output_dim if", "self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type): # z_out", "nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation", "'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm", "two lines run much faster in pytorch 0.4 than the two lines listed", "nn import torch.nn.functional as F import sys import time from models.unet_parts import *", "return logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__()", "= self.up4(x, x1) logits = self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module): def", "norm=norm, activation=activ)] for i in range(n_blk - 2): self.model += [LinearBlock(dim, dim, norm=norm,", "def forward(self, x): out = self.unet(x) out = F.softmax(out, dim=1) out = self.rounding(out)", "*x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module):", "ndf * 2, 4, 2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2,", "weight and bias before calling AdaIN!\" b, c = x.size(0), x.size(1) running_mean =", "= n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128)", "= x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) /", "self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) factor", "c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance", "\"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight", "dim //= 2 # use reflection padding in the last conv layer self.model", "an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images def", "+= [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf", "64 self.model = [] self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ,", "return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf, z_length, norm,", "Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type): # z_out = torch.randn(x.shape[0],", "adain_params to the AdaIN layers in model for m in model.modules(): if m.__class__.__name__", "generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self,", "z) return images_recon def decode(self, content, style): # decode content and style codes", "* (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x", "= num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2", "run much faster in pytorch 0.4 than the two lines listed below. mean", "= MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self, a, z, type): #", "== 'test': z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None", "512 // factor, bilinear) self.up2 = Up(512, 256 // factor, bilinear) self.up3 =", "dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 # for i", "self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm ==", "(x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def", "m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params =", "norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True # initialize padding if pad_type", "from the image. \"\"\" self.width = width self.height = height self.ndf = ndf", "class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock,", "[LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output", "self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) def forward(self, x): out", "mu_out = None #t0 = time.time() if script_type == 'training': reco = self.decoder(a_out,", "return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf =", "to the AdaIN layers in model for m in model.modules(): if m.__class__.__name__ ==", "dynamically assigned self.weight = None self.bias = None # just dummy buffers, not", "3, norm=norm, activation=activ, pad_type=pad_type)] for i in range(2): self.model += [Conv2dBlock(dim, 2 *", "norm x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean,", "dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations", "\"\"\" Args: width: input width height: input height upsample: upsampling type (nearest |", "activation == 'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) def", ":m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1)", "elif norm == 'none' or norm == 'sn': self.norm = None else: assert", "z_out, self.decoder_type) mu_out_tilde = None return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out", "self.upsample = upsample self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder", "reflection padding in the last conv layer self.model += [Conv2dBlock(dim, output_dim, 7, 1,", "image. \"\"\" self.width = width self.height = height self.ndf = ndf self.num_output_channels =", "elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad =", "self).__init__() self.ndf = ndf self.num_classes = num_classes + 1 self.main = [] #", "self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ +", "super(LinearBlock, self).__init__() use_bias = True # initialize fully connected layer if norm ==", "models.blocks import * from models.rounding import * from models.spectral_norm import * from models.distance_corr", "elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation =", "'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) #", "self.conv1(x) out = self.conv2(out) out = self.pred(out) out = F.softmax(out, dim=1) return out", "assign weight and bias before calling AdaIN!\" b, c = x.size(0), x.size(1) running_mean", "self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample = upsample self.num_classes = num_classes self.decoder_type", "instance norm x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm( x_reshaped,", "nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm", "self.num_classes, 1, 1, 0) def forward(self, x): out = self.conv1(x) out = self.conv2(out)", "style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features", "out = self.conv1(x) out = self.conv2(out) out = self.pred(out) out = F.softmax(out, dim=1)", "activation if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation", "again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 = conv_bn_relu(64, 64, 3,", "{}\".format(pad_type) # initialize normalization norm_dim = output_dim if norm == 'bn': self.norm =", "self.norm = LayerNorm(norm_dim) elif norm == 'none' or norm == 'sn': self.norm =", "m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias =", "[nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))] #128x56x56", "= ndf self.num_output_channels = num_output_channels self.norm = norm self.upsample = upsample self.unet =", "def forward(self, x): out = self.conv1(x) out = self.conv2(out) out = self.pred(out) out", "None else: assert 0, \"Unsupported activation: {}\".format(activation) # initialize convolution if norm ==", "classes \"\"\" self.h = height self.w = width self.ndf = ndf self.z_length =", "| bilateral) nclasses: number of semantice segmentation classes \"\"\" self.h = height self.w", "momentum # weight and bias are dynamically assigned self.weight = None self.bias =", "output activations self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module):", "1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2,", "assert self.weight is not None and self.bias is not None, \"Please assign weight", "z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') #", "forward(self, x): b_size = x.size(0) down_out = self.main(x) down_out = down_out.view(b_size, -1) output", "bias=False))] #1024x7x7 # state size. (ndf*16) x 14 x 14 self.out = nn.Linear(self.ndf", "m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class", "__init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True # initialize fully", "#128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))]", "import sys import time from models.unet_parts import * from models.blocks import * from", "self.weight = None self.bias = None # just dummy buffers, not used self.register_buffer('running_mean',", "nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, \"Unsupported activation:", "0)] self.model = nn.Sequential(*self.model) self.output_dim = dim def forward(self, x): return self.model(x) #", "#self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm =", "x = self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x, x2) x", "nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0) down_out = self.main(x) down_out = down_out.view(b_size,", "self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels,", "Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes", "__init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1", "self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self,", "upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input width height: input", "== 'val' or script_type == 'test': z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out,", "super(Decoder, self).__init__() self.model = [] # upsampling blocks for i in range(3): self.model", "- 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v,", "\"Unsupported activation: {}\".format(activation) def forward(self, x): out = self.fc(x) if self.norm: out =", "x * self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return v /", "padding in the last conv layer self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3,", "1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 # use reflection padding in the", "True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '('", "self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels)", "# initialize activation if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation ==", "dim def forward(self, x): return self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder", "= F.softmax(out, dim=1) out = self.rounding(out) return out class UNet(nn.Module): def __init__(self, n_channels,", "forward(self, x): out = self.fc(x) if self.norm: out = self.norm(out) if self.activation: out", "not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not", "model for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features]", "super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc =", "the last conv layer self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh',", "'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm", "forward(self, x): out = self.conv1(x) out = self.conv2(out) out = self.pred(out) out =", "upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self, x): out", "return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) +", "Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__()", "nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding", "* c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum,", "else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x =", "image images_recon = self.decode(a, z) return images_recon def decode(self, content, style): # decode", "elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type:", "# initialize fully connected layer if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim,", "self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)]", "activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model += [nn.Conv2d(dim, style_dim,", "+ ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features =", "= affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features))", "self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x))", "* 7, 1) self.main = nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0) down_out", "layers in model for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean =", "= SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride,", "forward(self, x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x", "self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5,", "is (nc) x 224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1,", "== 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln':", "# self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model +=", "pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model += [nn.Conv2d(dim, style_dim, 1,", "if script_type == 'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif", "self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate AdaIN parameters self.mlp", "== 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU()", "n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model = [] self.model += [LinearBlock(input_dim, dim, norm=norm,", "= nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self, x): out = self.conv1(x) out", "Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type):", "== 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type: {}\".format(pad_type) #", "lines run much faster in pytorch 0.4 than the two lines listed below.", "str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features", "mu_out_tilde = self.m_encoder(reco) elif script_type == 'val' or script_type == 'test': z_out =", "= num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())", "bilinear=True) self.rounding = RoundLayer() def forward(self, x): out = self.unet(x) out = F.softmax(out,", "activ='relu') def forward(self, a, z, type): # reconstruct an image images_recon = self.decode(a,", "4, 2, 1, bias=False))] #1024x7x7 # state size. (ndf*16) x 14 x 14", "self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim()", "forward(self, x): return self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def", "self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf, z_length,", "self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2)", "'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm", "= nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln':", "b_size = x.size(0) down_out = self.main(x) down_out = down_out.view(b_size, -1) output = self.out(down_out)", "factor, bilinear) self.up2 = Up(512, 256 // factor, bilinear) self.up3 = Up(256, 128", "self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x =", "self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))] #128x56x56 self.main +=", "encoder to extract anatomical information from the image. \"\"\" self.width = width self.height", "class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps", "[] self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i", "for i in range(2): self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1,", "from models.distance_corr import * from models.spade_resblk import * device = torch.device('cuda:0') # content", "x2) x = self.up4(x, x1) logits = self.outc(x) return logits # style class", "= self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1):", "+= [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112 self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main", "class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes", "DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256,", "+= 2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'):", "self.norm: out = self.norm(out) if self.activation: out = self.activation(out) return out class Conv2dBlock(nn.Module):", "norm self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def", "output = self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module): def __init__(self, width, height, num_classes,", "and bias before calling AdaIN!\" b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b)", "norm self.upsample = upsample self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels", "7, 1) self.main = nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0) down_out =", "height upsample: upsampling type (nearest | bilateral) nclasses: number of semantice segmentation classes", "super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum # weight", "an encoder to extract anatomical information from the image. \"\"\" self.width = width", "initialize normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif", "= self.m_encoder(reco) elif script_type == 'val' or script_type == 'test': z_out = self.m_encoder(x)", "pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding)", "+ [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape)", "self.activation: out = self.activation(out) return out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size,", "pad_type=pad_type)] dim *= 2 # for i in range(n_downsample - 2): # self.model", "for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std", "= self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 =", "norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm ==", "== 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif", "if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm =", "# initialize normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim)", "self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0,", "# upsampling blocks for i in range(3): self.model += [Conv2dBlock(dim, dim // 2,", "# no output activations self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0), -1))", "self.main = [] # input is (nc) x 224 x 224 self.main +=", "is not None, \"Please assign weight and bias before calling AdaIN!\" b, c", "use reflection padding in the last conv layer self.model += [Conv2dBlock(dim, output_dim, 7,", "* (x.dim() - 1) # print(x.size()) if x.size(0) == 1: # These two", "res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec),", "== \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std = adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1)", "self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] #", "x): x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3)", "output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True # initialize fully connected layer", "self.norm(x) if self.activation: x = self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self, style_dim,", "MLP to generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu')", "mu_out, logvar_out def reconstruct(self, a_out, z_out): reco = self.decoder(a_out, z_out, self.decoder_type) return reco", "bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1,", "norm='none', activ='relu') def forward(self, a, z, type): # reconstruct an image images_recon =", "__init__(self, ndf, num_classes): super(Discriminator, self).__init__() self.ndf = ndf self.num_classes = num_classes + 1", "StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model", "# AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\"", "# use reflection padding in the last conv layer self.model += [Conv2dBlock(dim, output_dim,", "check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 = conv_bn_relu(64, 64,", "content, style): # decode content and style codes to an image adain_params =", "= momentum # weight and bias are dynamically assigned self.weight = None self.bias", "bilinear else 1 self.down4 = Down(512, 1024 // factor) self.up1 = Up(1024, 512", "self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x, x2)", "14 x 14 self.out = nn.Linear(self.ndf * 8 * 7 * 7, 1)", "self.norm = norm self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding =", "+= [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model = nn.Sequential(*self.model) def", "\"\"\" \"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate AdaIN", "{}\".format(norm) # initialize activation if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation", "def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True # initialize", "factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def forward(self,", "def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model =", "parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self, a, z,", "self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta =", "AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps =", "forward(self, x): out = self.unet(x) out = F.softmax(out, dim=1) out = self.rounding(out) return", "[Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2", "SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)", "Apply instance norm x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(", "* from models.spade_resblk import * device = torch.device('cuda:0') # content class Segmentor(nn.Module): def", "inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))] #128x56x56 self.main", "elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm =", "= adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features:", "AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu') def forward(self, a,", "4, 2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4,", "= time.time() if script_type == 'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde =", "initialize padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate':", "+= [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False))] #1024x7x7 #", "# content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels", "self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w,", "# assign the adain_params to the AdaIN layers in model for m in", "nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self, x): out = self.conv1(x) out =", "adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return the", "= Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type): # z_out =", "def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) #", "to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images", "self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim", "kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True # initialize", "images_recon def decode(self, content, style): # decode content and style codes to an", "= LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none'", "1024 // factor) self.up1 = Up(1024, 512 // factor, bilinear) self.up2 = Up(512,", "width self.height = height self.ndf = ndf self.num_output_channels = num_output_channels self.norm = norm", "= width self.height = height self.ndf = ndf self.num_output_channels = num_output_channels self.norm =", "for i in range(n_blk - 2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)] self.model", "decoder class Ada_Decoder(nn.Module): # AdaIN auto-encoder architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels):", "bias before calling AdaIN!\" b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var", "width, height, ndf, num_output_channels, norm, upsample): super(AEncoder, self).__init__() \"\"\" Build an encoder to", "self.running_var.repeat(b) # Apply instance norm x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out", "self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3) x", "self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self, x): out =", "+ self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() -", "output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = [] # upsampling blocks for", "UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes =", "convolution if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else:", "self.main += [nn.LeakyReLU(0.2, inplace=True)] self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1,", "self.num_output_channels = num_output_channels self.norm = norm self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels,", "= AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder =", "0 for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return", "= torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred =", "= self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x,", "m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": num_adain_params += 2*m.num_features return num_adain_params class Decoder(nn.Module): def __init__(self, dim,", "out = self.norm(out) if self.activation: out = self.activation(out) return out class Conv2dBlock(nn.Module): def", "architecture def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels): super(Ada_Decoder, self).__init__() \"\"\" \"\"\" self.dec =", "self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type ==", "elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation =", "(ndf*16) x 14 x 14 self.out = nn.Linear(self.ndf * 8 * 7 *", "__init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model = [] self.model", "import torch.nn as nn import torch.nn.functional as F import sys import time from", "def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LinearBlock(nn.Module): def __init__(self,", "return x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim", "activ, pad_type): super(StyleEncoder, self).__init__() dim = 64 self.model = [] self.model += [Conv2dBlock(1,", "= AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None", "stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x):", "= nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero':", "activation=activ)] for i in range(n_blk - 2): self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]", "{}\".format(activation) # initialize convolution if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size,", "1) # print(x.size()) if x.size(0) == 1: # These two lines run much", "self).__init__() self.use_bias = True # initialize padding if pad_type == 'reflect': self.pad =", "bilinear) self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x):", "num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 =", "# check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 = conv_bn_relu(64,", "__repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self,", "'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation", "output_dim if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm", "OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3 =", "'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type == 'val'", "script_type == 'test': z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde =", "# just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x):", "super(Conv2dBlock, self).__init__() self.use_bias = True # initialize padding if pad_type == 'reflect': self.pad", "num_output_channels self.num_classes = num_classes+1 # check again self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1,", "script_type == 'training': reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = self.m_encoder(reco) elif script_type", "4, 4, 2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf *", "self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False))] #1024x7x7", "= Down(128, 256) self.down3 = Down(256, 512) factor = 2 if bilinear else", "= self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder,", "Down(512, 1024 // factor) self.up1 = Up(1024, 512 // factor, bilinear) self.up2 =", "def forward(self, x): b_size = x.size(0) down_out = self.main(x) down_out = down_out.view(b_size, -1)", "def forward(self, x): return self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk,", "(std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim()", "= F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps) return out.view(b, c,", "norm=norm, activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations self.model", "1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim def forward(self, x): return self.model(x)", "= nn.Sequential(*self.model) self.output_dim = dim def forward(self, x): return self.model(x) # decoder class", "x = (x - mean) / (std + self.eps) if self.affine: shape =", "= LayerNorm(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None", "nn.Sequential(*self.model) def forward(self, x): return self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim,", "self.num_classes = num_classes self.decoder_type = decoder_type self.num_mask_channels = num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none',", "self.eps = eps self.momentum = momentum # weight and bias are dynamically assigned", "self.momentum = momentum # weight and bias are dynamically assigned self.weight = None", "= self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3)", "64) self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512)", "in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\": mean = adain_params[:, :m.num_features] std = adain_params[:,", "1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2,", "x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def", "1).to(device) z_out = self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None", "self.assign_adain_params(adain_params, self.dec) images = self.dec(content) return images def assign_adain_params(self, adain_params, model): # assign", "= Down(256, 512) factor = 2 if bilinear else 1 self.down4 = Down(512,", "norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm ==", "n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear", "dim = 64 self.model = [] self.model += [Conv2dBlock(1, dim, 7, 1, 3,", "the image. \"\"\" self.width = width self.height = height self.ndf = ndf self.num_output_channels", "* 4, 4, 2, 1, bias=False))] #256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf", "* device = torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor,", "for i in range(n_downsample - 2): # self.model += [Conv2dBlock(dim, dim, 4, 2,", "out = self.conv2(out) out = self.pred(out) out = F.softmax(out, dim=1) return out class", "style codes to an image adain_params = self.mlp(style) self.assign_adain_params(adain_params, self.dec) images = self.dec(content)", "self.out = nn.Linear(self.ndf * 8 * 7 * 7, 1) self.main = nn.Sequential(*self.main)", "dim=1) return out class AEncoder(nn.Module): def __init__(self, width, height, ndf, num_output_channels, norm, upsample):", "self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation", "__init__(self, num_features, eps=1e-5, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum", "'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif", "initialize convolution if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias))", "n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear =", "self.pad = nn.ZeroPad2d(padding) else: assert 0, \"Unsupported padding type: {}\".format(pad_type) # initialize normalization", "{}\".format(activation) def forward(self, x): out = self.fc(x) if self.norm: out = self.norm(out) if", "norm=norm, activation=activ, pad_type=pad_type)] for i in range(2): self.model += [Conv2dBlock(dim, 2 * dim,", "before calling AdaIN!\" b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var =", "super(Discriminator, self).__init__() self.ndf = ndf self.num_classes = num_classes + 1 self.main = []", "self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features)", "/ (v.norm() + eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock,", "(x.dim() - 1) # print(x.size()) if x.size(0) == 1: # These two lines", "* 8 * 7 * 7, 1) self.main = nn.Sequential(*self.main) def forward(self, x):", "activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation =", "# MLP to generate AdaIN parameters self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none',", "self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None mu_out = None #t0 = time.time()", "norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv =", "'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) def forward(self, x):", "return images_recon def decode(self, content, style): # decode content and style codes to", "def forward(self, x): out = self.fc(x) if self.norm: out = self.norm(out) if self.activation:", "x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x = self.up2(x,", "activation == 'none': self.activation = None else: assert 0, \"Unsupported activation: {}\".format(activation) #", "num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width: input width height: input height upsample: upsampling", "else 1 self.down4 = Down(512, 1024 // factor) self.up1 = Up(1024, 512 //", "== 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim,", "self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1,", "output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model = [] self.model += [LinearBlock(input_dim,", "return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self, a_out, z_out): reco", "self.pred(out) out = F.softmax(out, dim=1) return out class AEncoder(nn.Module): def __init__(self, width, height,", "def forward(self, x): return self.model(x.view(x.size(0), -1)) class Discriminator(nn.Module): def __init__(self, ndf, num_classes): super(Discriminator,", "= Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): x1 =", "dim *= 2 # for i in range(n_downsample - 2): # self.model +=", "assigned self.weight = None self.bias = None # just dummy buffers, not used", "= [] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in range(n_blk -", "def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes", "return out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu',", "if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model): # return", "SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) # initialize normalization norm_dim", "4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling", "(x - mean) / (std + self.eps) if self.affine: shape = [1, -1]", "activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True)", "range(n_downsample - 2): # self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ,", "shape = [-1] + [1] * (x.dim() - 1) # print(x.size()) if x.size(0)", "assert 0, \"Unsupported normalization: {}\".format(norm) # initialize activation if activation == 'relu': self.activation", "initialize fully connected layer if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias))", "assign_adain_params(self, adain_params, model): # assign the adain_params to the AdaIN layers in model", "import * from models.distance_corr import * from models.spade_resblk import * device = torch.device('cuda:0')", "def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'): super(MLP, self).__init__() self.model = []", "#256x28x28 self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))]", "affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def", "'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim,", "self.num_classes = num_classes + 1 self.main = [] # input is (nc) x", "0, \"Unsupported padding type: {}\".format(pad_type) # initialize normalization norm_dim = output_dim if norm", "num_mask_channels self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect') self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels,", "output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x", "use_bias = True # initialize fully connected layer if norm == 'sn': self.fc", "64, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x) x2", "bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128, 256)", "self).__init__() use_bias = True # initialize fully connected layer if norm == 'sn':", "# initialize normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim)", "== 1: # These two lines run much faster in pytorch 0.4 than", "self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation ==", "1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 # for i in range(n_downsample -", "= mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:]", "x1) logits = self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features,", "output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def", "nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad", "activation=activ)] self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations self.model =", "self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma", "= conv_bn_relu(64, 64, 3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0)", "self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out,", "layer self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)] self.model =", "class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias = True", "class SDNet(nn.Module): def __init__(self, width, height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels,", "self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class", "== 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn':", "0, \"Unsupported normalization: {}\".format(norm) # initialize activation if activation == 'relu': self.activation =", "self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self,", "running_var = self.running_var.repeat(b) # Apply instance norm x_reshaped = x.contiguous().view(1, b * c,", "torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\"", "self.norm, self.upsample) self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def", "F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:])", "device = torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self, num_output_channels, num_classes): super(Segmentor, self).__init__()", "= self.pred(out) out = F.softmax(out, dim=1) return out class AEncoder(nn.Module): def __init__(self, width,", "ndf self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample = upsample", "LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine =", "eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps", "= nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if", "if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad =", "= self.m_encoder(x) a_out = self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None mu_out =", "[] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in range(n_blk - 2):", "adain_params[:, m.num_features:2*m.num_features] m.bias = mean.contiguous().view(-1) m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params", "= x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x =", "[nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim = dim def forward(self,", "self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3", "forward(self, x): shape = [-1] + [1] * (x.dim() - 1) # print(x.size())", "num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args: width:", "*x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps) return", "torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias", "in pytorch 0.4 than the two lines listed below. mean = x.view(-1).mean().view(*shape) std", "average pooling self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] self.model = nn.Sequential(*self.model) self.output_dim", "[SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf *", "if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape =", "x = self.activation(x) return x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type):", "model): # assign the adain_params to the AdaIN layers in model for m", "if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv", "224 x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112 self.main", "\"Unsupported activation: {}\".format(activation) # initialize convolution if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim,", "number of AdaIN parameters needed by the model num_adain_params = 0 for m", "\"Unsupported normalization: {}\".format(norm) # initialize activation if activation == 'relu': self.activation = nn.ReLU(inplace=True)", "== 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'none' or norm == 'sn':", "self.num_classes) self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels) def forward(self, x, mask, script_type): #", "return images def assign_adain_params(self, adain_params, model): # assign the adain_params to the AdaIN", "+= [Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //=", "res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model = [] # upsampling blocks for i", "num_output_channels, num_classes): super(Segmentor, self).__init__() \"\"\" \"\"\" self.num_output_channels = num_output_channels self.num_classes = num_classes+1 #", "activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2,", "= [] # upsampling blocks for i in range(3): self.model += [Conv2dBlock(dim, dim", "out class Conv2dBlock(nn.Module): def __init__(self, input_dim ,output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'):", "l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LinearBlock(nn.Module): def __init__(self, input_dim,", "self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))] #512x14x14", "== 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif", "= x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) # Apply instance norm", "else: assert 0, \"Unsupported normalization: {}\".format(norm) # initialize activation if activation == 'relu':", "self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] +", "LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or", "n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self, x): out = self.unet(x) out =", "self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0,", "x, mask, script_type): # z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device) z_out = self.m_encoder(x)", "__init__(self, num_features, eps=1e-5, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps", "256 // factor, bilinear) self.up3 = Up(256, 128 // factor, bilinear) self.up4 =", "self.main(x) down_out = down_out.view(b_size, -1) output = self.out(down_out) return output.view(-1, 1).squeeze(1) class SDNet(nn.Module):", "c, *x.size()[2:]) out = F.batch_norm( x_reshaped, running_mean, running_var, self.weight, self.bias, True, self.momentum, self.eps)", "from models.rounding import * from models.spectral_norm import * from models.distance_corr import * from", "n_classes) def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2)", "* self.gamma.view(*shape) + self.beta.view(*shape) return x def l2normalize(v, eps=1e-12): return v / (v.norm()", "logvar_out = None mu_out = None #t0 = time.time() if script_type == 'training':", "x class StyleEncoder(nn.Module): def __init__(self, style_dim, norm, activ, pad_type): super(StyleEncoder, self).__init__() dim =", "else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x -", "512) factor = 2 if bilinear else 1 self.down4 = Down(512, 1024 //", "print(x.size()) if x.size(0) == 1: # These two lines run much faster in", "self.model = [] # upsampling blocks for i in range(3): self.model += [Conv2dBlock(dim,", "pad_type=pad_type)] dim //= 2 # use reflection padding in the last conv layer", "normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm", "two lines listed below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean =", "'sn': self.norm = None else: assert 0, \"Unsupported normalization: {}\".format(norm) # initialize activation", "= self.a_encoder(x) seg_pred = self.segmentor(a_out) logvar_out = None mu_out = None #t0 =", "= Up(512, 256 // factor, bilinear) self.up3 = Up(256, 128 // factor, bilinear)", "eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias =", "8, 4, 2, 1, bias=False))] #512x14x14 self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf *", "= upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer() def forward(self, x):", "== 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else:", "# Apply instance norm x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out =", "self.model = [] self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)] for i in range(n_blk", "mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1]", "2*m.num_features:] def get_num_adain_params(self, model): # return the number of AdaIN parameters needed by", "self.dec(content) return images def assign_adain_params(self, adain_params, model): # assign the adain_params to the", "2, 1, norm=norm, activation=activ, pad_type=pad_type)] dim *= 2 # for i in range(n_downsample", "3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0) def forward(self, x):", "nn.BatchNorm2d(norm_dim) elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif", "buffers, not used self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is", "activation='none')] # no output activations self.model = nn.Sequential(*self.model) def forward(self, x): return self.model(x.view(x.size(0),", "x): return self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'):", "not None, \"Please assign weight and bias before calling AdaIN!\" b, c =", "+= [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i in range(2):", "stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True # initialize padding", "= dim def forward(self, x): return self.model(x) # decoder class Ada_Decoder(nn.Module): # AdaIN", "= Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) factor =", "height: input height upsample: upsampling type (nearest | bilateral) nclasses: number of semantice", "-1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std +", "elif norm == 'in': #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) self.norm = nn.InstanceNorm2d(norm_dim) elif norm", "self.conv2 = conv_bn_relu(64, 64, 3, 1, 1) self.pred = nn.Conv2d(64, self.num_classes, 1, 1,", "x 224 self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112 self.main +=", "layer if norm == 'sn': self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) else: self.fc =", "Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate AdaIN parameters self.mlp = MLP(z_length,", "the model num_adain_params = 0 for m in model.modules(): if m.__class__.__name__ == \"AdaptiveInstanceNorm2d\":", "= nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none':", "activation=activ, pad_type=pad_type)] for i in range(2): self.model += [Conv2dBlock(dim, 2 * dim, 4,", "= 2 if bilinear else 1 self.down4 = Down(512, 1024 // factor) self.up1", "m.weight = std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self,", "= x.size(0) down_out = self.main(x) down_out = down_out.view(b_size, -1) output = self.out(down_out) return", "dim // 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim //= 2 #", "= Up(1024, 512 // factor, bilinear) self.up2 = Up(512, 256 // factor, bilinear)", "self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x, x1) logits = self.outc(x)", "from models.spade_resblk import * device = torch.device('cuda:0') # content class Segmentor(nn.Module): def __init__(self,", "height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels): super(SDNet, self).__init__() \"\"\" Args:", "= None else: assert 0, \"Unsupported activation: {}\".format(activation) # initialize convolution if norm", "= self.segmentor(a_out) logvar_out = None mu_out = None #t0 = time.time() if script_type", "F.softmax(out, dim=1) out = self.rounding(out) return out class UNet(nn.Module): def __init__(self, n_channels, n_classes,", "mu_out_tilde = None return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out def reconstruct(self,", "# reconstruct an image images_recon = self.decode(a, z) return images_recon def decode(self, content,", "num_adain_params class Decoder(nn.Module): def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'): super(Decoder, self).__init__() self.model", "self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm ==", "= conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 = conv_bn_relu(64, 64, 3, 1, 1)", "= RoundLayer() def forward(self, x): out = self.unet(x) out = F.softmax(out, dim=1) out", "0.4 than the two lines listed below. mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape)", "1, 3, norm=norm, activation=activ, pad_type=pad_type)] for i in range(2): self.model += [Conv2dBlock(dim, 2", "= nn.Sequential(*self.model) def forward(self, x): return self.model(x) class MLP(nn.Module): def __init__(self, input_dim, output_dim,", "self.rounding = RoundLayer() def forward(self, x): out = self.unet(x) out = F.softmax(out, dim=1)", "'test': z_out = self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return", "= self.decode(a, z) return images_recon def decode(self, content, style): # decode content and", "self.up4(x, x1) logits = self.outc(x) return logits # style class AdaptiveInstanceNorm2d(nn.Module): def __init__(self,", "self.unet(x) out = F.softmax(out, dim=1) out = self.rounding(out) return out class UNet(nn.Module): def", "\"\"\" self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect') # MLP to generate AdaIN parameters", "self.z_length = z_length self.anatomy_out_channels = anatomy_out_channels self.norm = norm self.upsample = upsample self.num_classes", "for i in range(3): self.model += [Conv2dBlock(dim, dim // 2, 3, 1, 1,", "== 'bn': self.norm = nn.BatchNorm1d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm1d(norm_dim) elif", "bias=False))] #128x56x56 self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1,", "[1] * (x.dim() - 1) # print(x.size()) if x.size(0) == 1: # These", "norm == 'sn': self.norm = None else: assert 0, \"Unsupported normalization: {}\".format(norm) #", "= None self.bias = None # just dummy buffers, not used self.register_buffer('running_mean', torch.zeros(num_features))", "= std.contiguous().view(-1) if adain_params.size(1) > 2*m.num_features: adain_params = adain_params[:, 2*m.num_features:] def get_num_adain_params(self, model):", "i in range(3): self.model += [Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln',", "self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1) self.conv2 = conv_bn_relu(64, 64, 3, 1,", "self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation", "self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels,", "self.model += [Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)] dim", "self.m_encoder(x) reco = self.decoder(a_out, z_out, self.decoder_type) mu_out_tilde = None return reco, z_out, mu_out_tilde,", "+ eps) class LinearBlock(nn.Module): def __init__(self, input_dim, output_dim, norm='none', activation='relu'): super(LinearBlock, self).__init__() use_bias", "get_num_adain_params(self, model): # return the number of AdaIN parameters needed by the model", "= norm self.upsample = upsample self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True) self.rounding = RoundLayer()", "7 * 7, 1) self.main = nn.Sequential(*self.main) def forward(self, x): b_size = x.size(0)", "nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu':", "self.ndf = ndf self.num_output_channels = num_output_channels self.norm = norm self.upsample = upsample self.unet", "self.width = width self.height = height self.ndf = ndf self.num_output_channels = num_output_channels self.norm", "kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self," ]
[ "AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"],", "def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"],", "loudness self.mode = mode self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness", "MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY", "100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS", "acousticness (float): [0,1] liveness (float): [0,1] valence (float): [0,1] tempo (float): bpm. duration_ms", "0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS,", "= tempo self.duration_ms = duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"],", "= time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"],", "spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values():", "[0,1] liveness (float): [0,1] valence (float): [0,1] tempo (float): bpm. duration_ms (int). time_signature", "\"\"\" Params: danceability (float): [0,1] energy (float): [0,1] key (int). loudness (float): decibels?", "0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY,", "(float): [0,1] instrumentalness (float): [0,1] acousticness (float): [0,1] liveness (float): [0,1] valence (float):", "= liveness self.valence = valence self.tempo = tempo self.duration_ms = duration_ms self.time_signature =", "instrumentalness self.liveness = liveness self.valence = valence self.tempo = tempo self.duration_ms = duration_ms", "spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS,", "MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0,", ") def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS,", "liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1] energy (float): [0,1]", "self.energy = energy self.key = key self.loudness = loudness self.mode = mode self.speechiness", "acousticness self.instrumentalness = instrumentalness self.liveness = liveness self.valence = valence self.tempo = tempo", "AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS,", "[0,1] instrumentalness (float): [0,1] acousticness (float): [0,1] liveness (float): [0,1] valence (float): [0,1]", "self.key = key self.loudness = loudness self.mode = mode self.speechiness = speechiness self.acousticness", "MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS", "self.loudness = loudness self.mode = mode self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness", "[0,1] energy (float): [0,1] key (int). loudness (float): decibels? mode (int). speechiness (float):", "(int). speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness (float): [0,1] liveness (float): [0,1]", "loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability", "with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS, AudioFeatures.MAX_LIVENESS, AudioFeatures.MAX_VALENCE,", "tempo self.duration_ms = duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"],", "from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"],", "AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS,", "= MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness,", "def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS, AudioFeatures.MAX_LIVENESS,", "= MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE", "MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness,", "MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy, key,", "AudioFeatures: MIN_KEY_VALUE, MAX_KEY_VALUE = 0, 11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE", "spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], )", "MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS =", "0, 11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO,", "MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence,", "-60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY", "duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1] energy (float): [0,1] key (int). loudness", "MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY =", "spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY,", "danceability (float): [0,1] energy (float): [0,1] key (int). loudness (float): decibels? mode (int).", "danceability self.energy = energy self.key = key self.loudness = loudness self.mode = mode", "= 0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS", "speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1]", "time_signature): \"\"\" Params: danceability (float): [0,1] energy (float): [0,1] key (int). loudness (float):", "self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness = liveness self.valence", "(float): [0,1] valence (float): [0,1] tempo (float): bpm. duration_ms (int). time_signature (int). \"\"\"", "= MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy,", "time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"],", "= 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500", "AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE,", "= valence self.tempo = tempo self.duration_ms = duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features):", "instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1] energy (float):", "__init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms,", "AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS,", "key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params:", "tempo (float): bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability = danceability self.energy =", "AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, )", "return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS, AudioFeatures.MAX_LIVENESS, AudioFeatures.MAX_VALENCE, AudioFeatures.MAX_TEMPO,", "def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS,", "MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def", "= 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0", "= -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100", "0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS,", "MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE =", "MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness, mode, speechiness,", "AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values():", "900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY", "AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def", "AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS, AudioFeatures.MAX_LIVENESS, AudioFeatures.MAX_VALENCE, AudioFeatures.MAX_TEMPO, AudioFeatures.MAX_DURATION_MS,", "MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0,", "AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures(", "MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness,", "= 0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000", "key self.loudness = loudness self.mode = mode self.speechiness = speechiness self.acousticness = acousticness", ") def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS,", "MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY", "self.danceability = danceability self.energy = energy self.key = key self.loudness = loudness self.mode", "(float): [0,1] key (int). loudness (float): decibels? mode (int). speechiness (float): [0,1] instrumentalness", "MAX_KEY_VALUE = 0, 11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0,", "self.duration_ms = duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"],", "= MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE", "(float): bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability = danceability self.energy = energy", "= MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY =", "MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE", "= MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS", "self.tempo = tempo self.duration_ms = duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures(", "duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"],", "AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY,", "0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY =", "= key self.loudness = loudness self.mode = mode self.speechiness = speechiness self.acousticness =", "= duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"],", "= 0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS", "return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"],", "self.instrumentalness = instrumentalness self.liveness = liveness self.valence = valence self.tempo = tempo self.duration_ms", "= MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self,", "AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return", "liveness (float): [0,1] valence (float): [0,1] tempo (float): bpm. duration_ms (int). time_signature (int).", "key (int). loudness (float): decibels? mode (int). speechiness (float): [0,1] instrumentalness (float): [0,1]", "valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1] energy (float): [0,1] key", "MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE =", "spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS,", "[0,1] key (int). loudness (float): decibels? mode (int). speechiness (float): [0,1] instrumentalness (float):", "spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"],", "MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS =", "1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS", "liveness self.valence = valence self.tempo = tempo self.duration_ms = duration_ms self.time_signature = time_signature", "time_signature (int). \"\"\" self.danceability = danceability self.energy = energy self.key = key self.loudness", "MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY", "AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS,", "instrumentalness (float): [0,1] acousticness (float): [0,1] liveness (float): [0,1] valence (float): [0,1] tempo", "tempo, duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1] energy (float): [0,1] key (int).", "self.mode = mode self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness", "= instrumentalness self.liveness = liveness self.valence = valence self.tempo = tempo self.duration_ms =", "spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS,", "11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO", "MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS", "MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS =", "mode (int). speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness (float): [0,1] liveness (float):", "spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE,", "(int). \"\"\" self.danceability = danceability self.energy = energy self.key = key self.loudness =", "= MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE", "0, 1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS =", "MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability,", "= loudness self.mode = mode self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness =", "mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability (float):", "self.liveness = liveness self.valence = valence self.tempo = tempo self.duration_ms = duration_ms self.time_signature", "(float): [0,1] liveness (float): [0,1] valence (float): [0,1] tempo (float): bpm. duration_ms (int).", "500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE", "duration_ms (int). time_signature (int). \"\"\" self.danceability = danceability self.energy = energy self.key =", "\"\"\" self.danceability = danceability self.energy = energy self.key = key self.loudness = loudness", "MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO = 0,", "= danceability self.energy = energy self.key = key self.loudness = loudness self.mode =", "MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO =", "= MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE", "valence self.tempo = tempo self.duration_ms = duration_ms self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return", "AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE,", "MIN_KEY_VALUE, MAX_KEY_VALUE = 0, 11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE =", "MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS", "speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness = liveness self.valence = valence", "energy (float): [0,1] key (int). loudness (float): decibels? mode (int). speechiness (float): [0,1]", "MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS =", "(int). loudness (float): decibels? mode (int). speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness", "(float): [0,1] energy (float): [0,1] key (int). loudness (float): decibels? mode (int). speechiness", "return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO,", "[0,1] acousticness (float): [0,1] liveness (float): [0,1] valence (float): [0,1] tempo (float): bpm.", "(int). time_signature (int). \"\"\" self.danceability = danceability self.energy = energy self.key = key", "(float): decibels? mode (int). speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness (float): [0,1]", "Params: danceability (float): [0,1] energy (float): [0,1] key (int). loudness (float): decibels? mode", "0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS =", "danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature):", "= 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1", "self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness = liveness self.valence = valence self.tempo", "AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS, AudioFeatures.MAX_LIVENESS, AudioFeatures.MAX_VALENCE, AudioFeatures.MAX_TEMPO, AudioFeatures.MAX_DURATION_MS, AudioFeatures.MAX_TIME_SIGNATURE, )", "(float): [0,1] acousticness (float): [0,1] liveness (float): [0,1] valence (float): [0,1] tempo (float):", "[0,1] tempo (float): bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability = danceability self.energy", "= MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness, mode,", "decibels? mode (int). speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness (float): [0,1] liveness", "MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS =", "spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def", "energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\"", "= MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS =", "acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature): \"\"\" Params: danceability (float): [0,1] energy", "= speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness = liveness self.valence =", "spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE,", "0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE,", "energy self.key = key self.loudness = loudness self.mode = mode self.speechiness = speechiness", "MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE =", "loudness (float): decibels? mode (int). speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness (float):", "= energy self.key = key self.loudness = loudness self.mode = mode self.speechiness =", "mode self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness = liveness", "MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE MAX_INSTRUMENTALNESS = MAX_LIVENESS =", "= 0, 11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11", "def __init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo,", "[0,1] valence (float): [0,1] tempo (float): bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability", "MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60,", "speechiness (float): [0,1] instrumentalness (float): [0,1] acousticness (float): [0,1] liveness (float): [0,1] valence", "MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1 MIN_POPULARITY, MAX_POPULARITY =", "AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE, ) def with_maximum_values(): return AudioFeatures( AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY,", "= MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE MAX_DANCEABILITY = MAX_ENERGY =", "class AudioFeatures: MIN_KEY_VALUE, MAX_KEY_VALUE = 0, 11 MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1 MIN_TIME_SIGNATURE,", "11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS", "self.time_signature = time_signature def from_spotify_audio_features(spotify_audio_features): return AudioFeatures( spotify_audio_features[\"danceability\"], spotify_audio_features[\"energy\"], spotify_audio_features[\"key\"], spotify_audio_features[\"loudness\"], spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"],", "AudioFeatures.MAX_DANCEABILITY, AudioFeatures.MAX_ENERGY, AudioFeatures.MAX_KEY_VALUE, AudioFeatures.MAX_LOUDNESS, AudioFeatures.MAX_MODE_VALUE, AudioFeatures.MAX_SPEECHINESS, AudioFeatures.MAX_ACOUSTICNESS, AudioFeatures.MAX_INSTRUMENTALNESS, AudioFeatures.MAX_LIVENESS, AudioFeatures.MAX_VALENCE, AudioFeatures.MAX_TEMPO, AudioFeatures.MAX_DURATION_MS, AudioFeatures.MAX_TIME_SIGNATURE,", "self.valence = valence self.tempo = tempo self.duration_ms = duration_ms self.time_signature = time_signature def", "with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE,", "= mode self.speechiness = speechiness self.acousticness = acousticness self.instrumentalness = instrumentalness self.liveness =", "= acousticness self.instrumentalness = instrumentalness self.liveness = liveness self.valence = valence self.tempo =", "= MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE", "bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability = danceability self.energy = energy self.key", "AudioFeatures.MIN_DANCEABILITY, AudioFeatures.MIN_ENERGY, AudioFeatures.MIN_KEY_VALUE, AudioFeatures.MIN_LOUDNESS, AudioFeatures.MIN_MODE_VALUE, AudioFeatures.MIN_SPEECHINESS, AudioFeatures.MIN_ACOUSTICNESS, AudioFeatures.MIN_INSTRUMENTALNESS, AudioFeatures.MIN_LIVENESS, AudioFeatures.MIN_VALENCE, AudioFeatures.MIN_TEMPO, AudioFeatures.MIN_DURATION_MS, AudioFeatures.MIN_TIME_SIGNATURE,", "MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11 MIN_TEMPO, MAX_TEMPO = 0, 500 MIN_DURATION_MS, MAX_DURATION_MS =", "MAX_DURATION_MS = 0, 900000 MIN_LOUDNESS, MAX_LOUDNESS = -60, 0 MIN_PERCENTAGE, MAX_PERCENTAGE = 0,", "spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures( AudioFeatures.MIN_DANCEABILITY,", "valence (float): [0,1] tempo (float): bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability =", "(float): [0,1] tempo (float): bpm. duration_ms (int). time_signature (int). \"\"\" self.danceability = danceability", "= MAX_PERCENTAGE def __init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness,", "spotify_audio_features[\"mode\"], spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return", "spotify_audio_features[\"speechiness\"], spotify_audio_features[\"acousticness\"], spotify_audio_features[\"instrumentalness\"], spotify_audio_features[\"liveness\"], spotify_audio_features[\"valence\"], spotify_audio_features[\"tempo\"], spotify_audio_features[\"duration_ms\"], spotify_audio_features[\"time_signature\"], ) def with_minimum_values(): return AudioFeatures(", "1 MIN_POPULARITY, MAX_POPULARITY = 0, 100 MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS" ]
[ "file: file.write('These are the results of our experiment') file.write('\\n') # Add new line", "experiment') file.write('\\n') # Add new line with \\n file.write('Account X has N followers", "# Add new line with \\n file.write('Account X has N followers on social", "the file with open('dataresults.txt', 'a') as file: file.write('These are the results of our", "# Create the file with open('dataresults.txt', 'a') as file: file.write('These are the results", "open('dataresults.txt', 'a') as file: file.write('These are the results of our experiment') file.write('\\n') #", "of our experiment') file.write('\\n') # Add new line with \\n file.write('Account X has", "Create the file with open('dataresults.txt', 'a') as file: file.write('These are the results of", "'a') as file: file.write('These are the results of our experiment') file.write('\\n') # Add", "our experiment') file.write('\\n') # Add new line with \\n file.write('Account X has N", "file with open('dataresults.txt', 'a') as file: file.write('These are the results of our experiment')", "are the results of our experiment') file.write('\\n') # Add new line with \\n", "Add new line with \\n file.write('Account X has N followers on social Z')", "the results of our experiment') file.write('\\n') # Add new line with \\n file.write('Account", "file.write('These are the results of our experiment') file.write('\\n') # Add new line with", "with open('dataresults.txt', 'a') as file: file.write('These are the results of our experiment') file.write('\\n')", "new line with \\n file.write('Account X has N followers on social Z') file.write('\\n')", "as file: file.write('These are the results of our experiment') file.write('\\n') # Add new", "results of our experiment') file.write('\\n') # Add new line with \\n file.write('Account X", "file.write('\\n') # Add new line with \\n file.write('Account X has N followers on" ]
[ "None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None", "# LINKS = ( # ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), #", "'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y'", "'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general'", "= '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER =", "since 10/2017.\" } } # Feed generation is usually not desired when developing", "= 10 # Uncomment following line if you want document-relative URLs when developing", "AUTHOR = 'JQ' SITENAME = 'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS =", "None # LINKS = ( # ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'),", "<filename>pelicanconf.py #!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals", "vacation time since 10/2017.\" } } # Feed generation is usually not desired", "= 'read this stuff plz' AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\", \"cover\":", "\"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use like half their vacation", "{ \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to", "Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM =", "time since 10/2017.\" } } # Feed generation is usually not desired when", "SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment following line if", "Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'),", "SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\",", "Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH = 'content' TIMEZONE", "\"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use like half their", "False SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO = { \"jq\": { \"name\":", "\"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use like half", "('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice", ") SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment following line", "- Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice FAQ',", "'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG =", "THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this", "STATIC_PATHS = ['images', 'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en'", "# from __future__ import unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan Trip 2018'", "use like half their vacation time since 10/2017.\" } } # Feed generation", "SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago'", "Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS = True", "like half their vacation time since 10/2017.\" } } # Feed generation is", "= None # LINKS = ( # ('Reddit - Japan Travel Advice Wiki',", "'JQ' SITENAME = 'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs']", "others to use like half their vacation time since 10/2017.\" } } #", "stuff plz' AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\",", "= None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # LINKS", "True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read", "('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment following line if you want document-relative", "\"bio\": \"Inspiring others to use like half their vacation time since 10/2017.\" }", "-*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = 'JQ' SITENAME", "= 'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE =", "= './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this stuff", "'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL =", "2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH = 'content' TIMEZONE =", "['images', 'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT =", "__future__ import unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan Trip 2018' SITEURL =", "unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS", "DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER", "= False SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO = { \"jq\": {", "this stuff plz' AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\":", "None AUTHOR_FEED_RSS = None # LINKS = ( # ('Reddit - Japan Travel", "# ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = (", "python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR =", "\"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use like half their vacation time since", "\"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use", "\"http://quarl.es\", \"bio\": \"Inspiring others to use like half their vacation time since 10/2017.\"", "# -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = 'JQ'", "( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment following line if you want", "= 'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH =", "is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM", "from __future__ import unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan Trip 2018' SITEURL", "\"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use like half their vacation time", "('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'),", "Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), #", "= 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG", "utf-8 -*- # from __future__ import unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan", "= 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU =", "10/2017.\" } } # Feed generation is usually not desired when developing FEED_ALL_ATOM", "Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # )", "TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # LINKS = (", "} } # Feed generation is usually not desired when developing FEED_ALL_ATOM =", "'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False", "'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO = {", "\"Inspiring others to use like half their vacation time since 10/2017.\" } }", "FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS", "SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO = { \"jq\":", "desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM", "# Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS =", "None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # LINKS =", "None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # LINKS = ( # ('Reddit", "= 'JQ' SITENAME = 'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images',", "= { \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\":", "not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None", "{ \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring", "10 # Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS", "'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True", "= True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE =", ") DEFAULT_PAGINATION = 10 # Uncomment following line if you want document-relative URLs", "their vacation time since 10/2017.\" } } # Feed generation is usually not", "\"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others", "Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION =", "FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 #", "SITENAME = 'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH", "= None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS =", "- Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), )", "PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY", "CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None #", "coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = 'JQ' SITENAME =", "import unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan Trip 2018' SITEURL = 'http://localhost:2018'", "DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME", "= 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY =", "generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None", "AUTHOR_FEED_RSS = None # LINKS = ( # ('Reddit - Japan Travel Advice", "'read this stuff plz' AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\",", "developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None", "= None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # LINKS = ( #", "Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL", "DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE", "# ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan Travel", "plz' AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\":", "= ( # ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit -", "Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION", "# Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM", "TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU", "AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # LINKS = ( # ('Reddit -", "half their vacation time since 10/2017.\" } } # Feed generation is usually", "Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10", "\"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\", \"bio\": \"Inspiring others to use like", "AUTHORS_BIO = { \"jq\": { \"name\": \"JQ\", \"cover\": \"images/arahira.jpg\", \"image\": \"images/avatar.gif\", \"website\": \"http://quarl.es\",", "to use like half their vacation time since 10/2017.\" } } # Feed", "# ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment following", "= ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment following line if you", "HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO", "= 'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME =", "usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM =", "= 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this stuff plz' AUTHORS_BIO =", "LINKS = ( # ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit", "DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE", "} # Feed generation is usually not desired when developing FEED_ALL_ATOM = None", "#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR", "= None AUTHOR_FEED_RSS = None # LINKS = ( # ('Reddit - Japan", "DEFAULT_PAGINATION = 10 # Uncomment following line if you want document-relative URLs when", "'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'), # ) SOCIAL = ( ('envelope','mailto:<EMAIL>'), ) DEFAULT_PAGINATION = 10 # Uncomment", "= ['images', 'pdfs'] PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = 'en' DEFAULT_DATE_FORMAT", "( # ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'), # ('Reddit - Japan", "'%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila' HEADER_COVER = 'images/skytree.jpg'", "'Japan Trip 2018' SITEURL = 'http://localhost:2018' STATIC_PATHS = ['images', 'pdfs'] PATH = 'content'", "'./themes/attila' HEADER_COVER = 'images/skytree.jpg' SHOW_FULL_ARTICLE = False SITESUBTITLE = 'read this stuff plz'", "'en' DEFAULT_DATE_FORMAT = '%-m/%-d/%Y' DEFAULT_CATEGORY = 'general' DISPLAY_PAGES_ON_MENU = True THEME = './themes/attila'", "when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM =", "-*- # from __future__ import unicode_literals AUTHOR = 'JQ' SITENAME = 'Japan Trip" ]
[ "import OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app import", "from pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask from", "language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op)", "else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA reminder email could", "email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except", "op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", )", "op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA reminder email could not", "event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try:", "position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject,", "email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA reminder email could not be sent\")", "try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template,", "order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail(", "str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context,", "get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else:", "= str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template,", "pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app import app logger =", "import logging from pretix.base.email import get_email_context from pretix.base.i18n import language from pretix.base.models import", "event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject =", "pretix.base.email import get_email_context from pretix.base.i18n import language from pretix.base.models import OrderPosition from pretix.base.services.mail", "pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app", "@app.task(base=EventTask, bind=True) def send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template", "pretix.base.services.tasks import EventTask from pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def", "def send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body", "email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException:", "\"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA reminder", "import get_email_context from pretix.base.i18n import language from pretix.base.models import OrderPosition from pretix.base.services.mail import", "event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail(", "pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position): op", "from pretix.base.services.tasks import EventTask from pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True)", "import language from pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import", "language from pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask", "if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context,", "bind=True) def send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template =", "from pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position):", "app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position): op = OrderPosition.objects.get(pk=position)", "import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position): op =", "OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app import app", "= logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale,", "OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event,", "import EventTask from pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self,", "logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region):", "email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try: if", "email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject,", "op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context", ") else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA reminder email", "from pretix.base.email import get_email_context from pretix.base.i18n import language from pretix.base.models import OrderPosition from", "= OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context =", "email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA reminder email could not be", "get_email_context from pretix.base.i18n import language from pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException", "from pretix.base.services.mail import SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app import app logger", "position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject)", "EventTask from pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event,", "logger = logging.getLogger(__name__) @app.task(base=EventTask, bind=True) def send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with", "pretix.base.i18n import language from pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException from pretix.base.services.tasks", "email_context = get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\",", "send_email(self, event, position): op = OrderPosition.objects.get(pk=position) with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject", "from pretix.base.i18n import language from pretix.base.models import OrderPosition from pretix.base.services.mail import SendMailException from", "with language(op.order.locale, event.settings.region): email_template = event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order,", "email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\", ) except SendMailException: logger.exception(\"CWA", "op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", ) else: op.order.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.email.cwa\",", "= get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email: op.send_mail( email_subject, email_template, email_context, \"pretix_cwa.order.position.email.cwa\", )", "= event.settings.cwa_checkin_email_body email_subject = str(event.settings.cwa_checkin_email_subject) email_context = get_email_context(event=event, order=op.order, position=op) try: if op.attendee_email:", "logging from pretix.base.email import get_email_context from pretix.base.i18n import language from pretix.base.models import OrderPosition", "SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app import app logger = logging.getLogger(__name__) @app.task(base=EventTask,", "import SendMailException from pretix.base.services.tasks import EventTask from pretix.celery_app import app logger = logging.getLogger(__name__)" ]
[ "from dataclasses import dataclass from typing import NewType from uuid import UUID from", "OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str", "typing import NewType from uuid import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID =", "byceps.services.shop.order.transfer.number ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 <NAME> :License: Revised BSD (see `LICENSE` file for details)", "OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str value: int OrderNumber = NewType('OrderNumber', str)", "\"\"\" from dataclasses import dataclass from typing import NewType from uuid import UUID", "from typing import NewType from uuid import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID", ":License: Revised BSD (see `LICENSE` file for details) \"\"\" from dataclasses import dataclass", "NewType from uuid import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID)", "UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id:", "2006-2021 <NAME> :License: Revised BSD (see `LICENSE` file for details) \"\"\" from dataclasses", "\"\"\" byceps.services.shop.order.transfer.number ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 <NAME> :License: Revised BSD (see `LICENSE` file for", "...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id:", ":Copyright: 2006-2021 <NAME> :License: Revised BSD (see `LICENSE` file for details) \"\"\" from", "= NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str value:", "@dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str value: int OrderNumber =", "`LICENSE` file for details) \"\"\" from dataclasses import dataclass from typing import NewType", "import NewType from uuid import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID',", "details) \"\"\" from dataclasses import dataclass from typing import NewType from uuid import", "class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str value: int OrderNumber = NewType('OrderNumber',", "import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID", "from uuid import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True)", "UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str value: int OrderNumber", "ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix:", "file for details) \"\"\" from dataclasses import dataclass from typing import NewType from", "import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence:", "NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID shop_id: ShopID prefix: str value: int", "uuid import UUID from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class", "for details) \"\"\" from dataclasses import dataclass from typing import NewType from uuid", "(see `LICENSE` file for details) \"\"\" from dataclasses import dataclass from typing import", "dataclass from typing import NewType from uuid import UUID from ...shop.transfer.models import ShopID", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 <NAME> :License: Revised BSD (see `LICENSE` file for details) \"\"\"", "import dataclass from typing import NewType from uuid import UUID from ...shop.transfer.models import", "Revised BSD (see `LICENSE` file for details) \"\"\" from dataclasses import dataclass from", "<NAME> :License: Revised BSD (see `LICENSE` file for details) \"\"\" from dataclasses import", "BSD (see `LICENSE` file for details) \"\"\" from dataclasses import dataclass from typing", "dataclasses import dataclass from typing import NewType from uuid import UUID from ...shop.transfer.models", "from ...shop.transfer.models import ShopID OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID) @dataclass(frozen=True) class OrderNumberSequence: id: OrderNumberSequenceID" ]
[ "class maintains the image database for display to clients\"\"\" import os class ImageDatabase(object):", "\"\"\"This class maintains the image database for display to clients\"\"\" import os class", "import os class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES", "\"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA =", "CATEGORIES = set() FILE_DATA = {} def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images()", "file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def categories(self): return self.CATEGORIES.copy() def files_in_category(self, category):", "f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def categories(self): return self.CATEGORIES.copy()", "file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def categories(self): return self.CATEGORIES.copy() def", "= set() FILE_DATA = {} def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def", "root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in f_names:", "self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in f_names: file_path = os.path.join(root, f_name) file_type", "image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA = {} def", "os class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES =", "display to clients\"\"\" import os class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES =", "def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for root, category, f_names", "self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category)", "maintains the image database for display to clients\"\"\" import os class ImageDatabase(object): \"\"\"container", "os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in f_names: file_path = os.path.join(root, f_name)", "root_dir self._scan_for_images() def _scan_for_images(self): for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] =", "for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA = {}", "self._scan_for_images() def _scan_for_images(self): for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = []", "set() FILE_DATA = {} def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self):", "for f_name in f_names: file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type", "data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA = {} def __init__(self,", "= root_dir self._scan_for_images() def _scan_for_images(self): for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category]", "for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in", "in f_names: file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES:", "ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA", "{} def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for root, category,", "[\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA = {} def __init__(self, root_dir): self.root_dir =", "FILE_DATA[category] = [] for f_name in f_names: file_path = os.path.join(root, f_name) file_type =", "to clients\"\"\" import os class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\",", "= [] for f_name in f_names: file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower()", "f_name in f_names: file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in", "database for display to clients\"\"\" import os class ImageDatabase(object): \"\"\"container for image data\"\"\"", "= {} def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for root,", "<filename>server/image/image_controller.py<gh_stars>0 \"\"\"This class maintains the image database for display to clients\"\"\" import os", "in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in f_names: file_path = os.path.join(root,", "[] for f_name in f_names: file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if", "category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in f_names: file_path", "file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def", "= os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def categories(self):", "_scan_for_images(self): for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name", "root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for root, category, f_names in os.walk(self.root_dir):", "image database for display to clients\"\"\" import os class ImageDatabase(object): \"\"\"container for image", "VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA = {} def __init__(self, root_dir):", "clients\"\"\" import os class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"]", "for display to clients\"\"\" import os class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES", "class ImageDatabase(object): \"\"\"container for image data\"\"\" VALID_IMAGE_TYPES = [\"png\", \"jpg\"] CATEGORIES = set()", "= [\"png\", \"jpg\"] CATEGORIES = set() FILE_DATA = {} def __init__(self, root_dir): self.root_dir", "= file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def categories(self): return self.CATEGORIES.copy() def files_in_category(self,", "\"jpg\"] CATEGORIES = set() FILE_DATA = {} def __init__(self, root_dir): self.root_dir = root_dir", "f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for f_name in f_names: file_path =", "FILE_DATA = {} def __init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for", "__init__(self, root_dir): self.root_dir = root_dir self._scan_for_images() def _scan_for_images(self): for root, category, f_names in", "f_names: file_path = os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path)", "os.path.join(root, f_name) file_type = file_path[file_path.rindex(\".\")+1:].lower() if file_type in self.VALID_FILE_TYPES: FILE_DATA[category].add(file_path) def categories(self): return", "the image database for display to clients\"\"\" import os class ImageDatabase(object): \"\"\"container for", "def _scan_for_images(self): for root, category, f_names in os.walk(self.root_dir): self.CATEGORIES.add(category) FILE_DATA[category] = [] for" ]
[ "Aligned to shape and cropped by bounding boxes face images # default shape", "with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) ->", "face recognition ResNet model. .. note:: * face alignment pre-processing used with 5", "\"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented", "width = image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width),", "ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition ResNet model. .. note:: *", "def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box in bounding_boxes: bb", "= dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and cropped", "max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra =", "used with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\"", "1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector =", "height, width = image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(),", "if n_det < 1: raise FaceNotFoundError height, width = image.shape[:2] det_scores = list()", "min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return", "shape (n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image, shapes) embeddings = self._face_encoder.compute_face_descriptor(face_images) return", "= image.shape[:2] det_scores = list() bounding_boxes = list() for det in detections: bounding_boxes.append(", "name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note:: * bounding box sizes are", ".. note:: * bounding box sizes are equal for all detections. * detector", "] for rect in detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained", "dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib'))", "class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin Object Detection\" loss function.", "note:: * bounding box sizes are equal for all detections. * detector does", "box sizes are equal for all detections. * detector does not provide confidence", "function. .. note:: * bounding box sizes are equal for all detections. *", "box sizes are equal for all detections. * to run in realtime requires", "face images # default shape (n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image, shapes)", "bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128):", "= self._face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError height, width", "(n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image, shapes) embeddings = self._face_encoder.compute_face_descriptor(face_images) return np.array(embeddings)", "raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(),", "0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ] for rect in detections]) return", "from face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file from", "height, width = image.shape[:2] det_scores = list() bounding_boxes = list() for det in", "dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs):", "min(rect.right(), width), min(rect.bottom(), height) ] for rect in detections]) return bounding_boxes, dict() class", "* bounding box sizes are equal for all detections. * to run in", "import fetch_file from face_engine.models import Detector, Embedder # download dependent models for url", "Embedder # download dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]:", "= list() for det in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(),", "[ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ] for rect in", "compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box in bounding_boxes: bb =", "\"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes =", "bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape", "detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence)", "dlib import numpy as np from face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError", "detections. * to run in realtime requires high-end Nvidia GPU with CUDA/cuDNN. References:", "dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition", "shapes = dlib.full_object_detections() for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3])", "download dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES,", "as np from face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import", "bounding box sizes are equal for all detections. * to run in realtime", "http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1(", "face_engine.models import Detector, Embedder # download dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\",", ".. note:: * bounding box sizes are equal for all detections. * to", "in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to", "to shape and cropped by bounding boxes face images # default shape (n_faces,", "2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES,", "Object Detection\" loss function. .. note:: * bounding box sizes are equal for", "recognition ResNet model. .. note:: * face alignment pre-processing used with 5 point", "bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes", "n_det = len(detections) if n_det < 1: raise FaceNotFoundError height, width = image.shape[0:2]", "from face_engine.models import Detector, Embedder # download dependent models for url in [", "from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models import Detector, Embedder", "dim=128): \"\"\" Dlib pre-trained face recognition ResNet model. .. note:: * face alignment", "Detection\" loss function. .. note:: * bounding box sizes are equal for all", "RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models import Detector,", "os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes", "are equal for all detections. * detector does not provide confidence scores for", "detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin", "boxes face images # default shape (n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image,", "import RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models import", "self._face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError height, width =", "np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib", "]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder,", "\"\"\" def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor(", "References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder", "import Detector, Embedder # download dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\",", "for all detections. * to run in realtime requires high-end Nvidia GPU with", "model. .. note:: * bounding box sizes are equal for all detections. *", "References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector", "self._cnn_face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError height, width =", "shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and cropped by bounding boxes face images", "\"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\"", "FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models import Detector, Embedder # download dependent", "__init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\"))", "os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box in", "det in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height)", "self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image,", "bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin Object Detection\"", "= dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face", "point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) ->", "__init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image) n_det = len(detections)", "len(detections) if n_det < 1: raise FaceNotFoundError height, width = image.shape[:2] det_scores =", "shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None:", "import dlib import numpy as np from face_engine import RESOURCES from face_engine.exceptions import", "= self._cnn_face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError height, width", "confidence scores for detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image):", "= np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\"", "MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin Object Detection\" loss function. ..", "\"Max-Margin Object Detection\" loss function. .. note:: * bounding box sizes are equal", "note:: * face alignment pre-processing used with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html", "height) ] for rect in detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib", "http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\"))", "scores for detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections", "for rect in detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN", "height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra class", "loss function. .. note:: * bounding box sizes are equal for all detections.", "bounding_boxes = list() for det in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0),", "dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box", "1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder =", "with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def", "dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image) n_det = len(detections) if", "\"\"\" Dlib pre-trained face recognition ResNet model. .. note:: * face alignment pre-processing", "\"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image) n_det", "url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'):", "2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES,", "bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and cropped by bounding boxes", "sizes are equal for all detections. * detector does not provide confidence scores", "ResNet model. .. note:: * face alignment pre-processing used with 5 point shape_predictor.", "fetch_file from face_engine.models import Detector, Embedder # download dependent models for url in", "= list() bounding_boxes = list() for det in detections: bounding_boxes.append( [ max(det.rect.left(), 0),", "bounding boxes face images # default shape (n_faces, 150, 150, 3) face_images =", "dlib.full_object_detections() for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb))", "len(detections) if n_det < 1: raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes =", "shape and cropped by bounding boxes face images # default shape (n_faces, 150,", "< 1: raise FaceNotFoundError height, width = image.shape[:2] det_scores = list() bounding_boxes =", "raise FaceNotFoundError height, width = image.shape[:2] det_scores = list() bounding_boxes = list() for", "to run in realtime requires high-end Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html", "extra = dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained", "width = image.shape[:2] det_scores = list() bounding_boxes = list() for det in detections:", ".. note:: * face alignment pre-processing used with 5 point shape_predictor. References: 1.", "detect(self, image): detections = self._cnn_face_detector(image) n_det = len(detections) if n_det < 1: raise", "n_det < 1: raise FaceNotFoundError height, width = image.shape[:2] det_scores = list() bounding_boxes", "detections = self._face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError height,", "< 1: raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(),", "* bounding box sizes are equal for all detections. * detector does not", "CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None:", "list() bounding_boxes = list() for det in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(),", "[ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes =", "numpy as np from face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching", "pre-processing used with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2", "= dlib.full_object_detections() for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image,", "extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition ResNet model. ..", "min(rect.bottom(), height) ] for rect in detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'):", "bb)) # Aligned to shape and cropped by bounding boxes face images #", "]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. ..", "3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def", "# download dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url,", "np from face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file", "http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1(", "class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note:: * bounding box", "= dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes,", "in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib", "pre-trained CNN model with \"Max-Margin Object Detection\" loss function. .. note:: * bounding", "\"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model.", "dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and cropped by", "default shape (n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image, shapes) embeddings = self._face_encoder.compute_face_descriptor(face_images)", "image.shape[:2] det_scores = list() bounding_boxes = list() for det in detections: bounding_boxes.append( [", "bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition ResNet model.", "def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES,", "for det in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(),", "class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition ResNet model. .. note::", "* detector does not provide confidence scores for detections. \"\"\" def __init__(self): self._face_detector", "list() for det in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width),", "**kwargs): shapes = dlib.full_object_detections() for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2],", "models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class", "image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height)", "\"\"\" def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image):", "* face alignment pre-processing used with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2.", "equal for all detections. * detector does not provide confidence scores for detections.", "-> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def", "bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and", "images # default shape (n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image, shapes) embeddings", "equal for all detections. * to run in realtime requires high-end Nvidia GPU", "max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ] for rect in detections])", "sizes are equal for all detections. * to run in realtime requires high-end", "self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image) n_det =", "\"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image) n_det = len(detections) if n_det <", "image): detections = self._cnn_face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError", "1: raise FaceNotFoundError height, width = image.shape[:2] det_scores = list() bounding_boxes = list()", "in detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with", "= len(detections) if n_det < 1: raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes", "in detections: bounding_boxes.append( [ max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ])", "# default shape (n_faces, 150, 150, 3) face_images = dlib.get_face_chips(image, shapes) embeddings =", "self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image) n_det = len(detections) if", "FaceNotFoundError height, width = image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(), 0),", "max(det.rect.left(), 0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes)", "width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes,", "not provide confidence scores for detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def", "None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image) n_det", "Detector, Embedder # download dependent models for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\"", "does not provide confidence scores for detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector()", "requires high-end Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2", "GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self)", "n_det = len(detections) if n_det < 1: raise FaceNotFoundError height, width = image.shape[:2]", "all detections. * detector does not provide confidence scores for detections. \"\"\" def", "detector does not provide confidence scores for detections. \"\"\" def __init__(self): self._face_detector =", "0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores)", "image): detections = self._face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError", "from face_engine.fetching import fetch_file from face_engine.models import Detector, Embedder # download dependent models", "det_scores = list() bounding_boxes = list() for det in detections: bounding_boxes.append( [ max(det.rect.left(),", "for all detections. * detector does not provide confidence scores for detections. \"\"\"", "detections. * detector does not provide confidence scores for detections. \"\"\" def __init__(self):", "rect in detections]) return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model", "face_engine.fetching import fetch_file from face_engine.models import Detector, Embedder # download dependent models for", "import os import dlib import numpy as np from face_engine import RESOURCES from", "in realtime requires high-end Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html", "5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self)", "import FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models import Detector, Embedder # download", "alignment pre-processing used with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2 3.", "image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0],", "detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image)", "name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition ResNet model. .. note:: * face", "FaceNotFoundError height, width = image.shape[:2] det_scores = list() bounding_boxes = list() for det", "# Aligned to shape and cropped by bounding boxes face images # default", "os import dlib import numpy as np from face_engine import RESOURCES from face_engine.exceptions", "[ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram", "def detect(self, image): detections = self._cnn_face_detector(image) n_det = len(detections) if n_det < 1:", "for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) #", "Dlib pre-trained face recognition ResNet model. .. note:: * face alignment pre-processing used", "fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note::", "return bounding_boxes, dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin Object", "dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image) n_det = len(detections) if n_det <", "None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self,", "and cropped by bounding boxes face images # default shape (n_faces, 150, 150,", "for detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections =", "provide confidence scores for detections. \"\"\" def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self,", "__init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections =", "n_det < 1: raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes = np.array([ [", "\"Histogram Oriented Gradients\" model. .. note:: * bounding box sizes are equal for", "max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ] for rect in detections]) return bounding_boxes,", "= len(detections) if n_det < 1: raise FaceNotFoundError height, width = image.shape[:2] det_scores", "bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and cropped by bounding", "bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1],", "= np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ] for", "face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models import Detector, Embedder #", "det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet',", "Gradients\" model. .. note:: * bounding box sizes are equal for all detections.", "if n_det < 1: raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes = np.array([", "with \"Max-Margin Object Detection\" loss function. .. note:: * bounding box sizes are", "HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note:: * bounding box sizes", "dict() class MMODDetector(Detector, name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin Object Detection\" loss", "pre-trained face recognition ResNet model. .. note:: * face alignment pre-processing used with", "for url in [ \"http://dlib.net/files/mmod_human_face_detector.dat.bz2\", \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\", \"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\" ]: fetch_file(url, os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector,", "all detections. * to run in realtime requires high-end Nvidia GPU with CUDA/cuDNN.", "run in realtime requires high-end Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2.", "http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor =", "cropped by bounding boxes face images # default shape (n_faces, 150, 150, 3)", "detections = self._cnn_face_detector(image) n_det = len(detections) if n_det < 1: raise FaceNotFoundError height,", "import numpy as np from face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError from", "def detect(self, image): detections = self._face_detector(image) n_det = len(detections) if n_det < 1:", "def __init__(self): self._face_detector = dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image) n_det =", "= dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for", "os.path.join(RESOURCES, 'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note:: *", "\"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note:: * bounding box sizes are equal", "bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ]", "are equal for all detections. * to run in realtime requires high-end Nvidia", "1: raise FaceNotFoundError height, width = image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0),", "width), min(rect.bottom(), height) ] for rect in detections]) return bounding_boxes, dict() class MMODDetector(Detector,", "bounding box sizes are equal for all detections. * detector does not provide", "detect(self, image): detections = self._face_detector(image) n_det = len(detections) if n_det < 1: raise", "face alignment pre-processing used with 5 point shape_predictor. References: 1. http://dlib.net/python/index.html 2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2", "* to run in realtime requires high-end Nvidia GPU with CUDA/cuDNN. References: 1.", "Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def", "-> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image)", "= dlib.get_frontal_face_detector() def detect(self, image): detections = self._face_detector(image) n_det = len(detections) if n_det", "face_engine import RESOURCES from face_engine.exceptions import FaceNotFoundError from face_engine.fetching import fetch_file from face_engine.models", "Oriented Gradients\" model. .. note:: * bounding box sizes are equal for all", "CNN model with \"Max-Margin Object Detection\" loss function. .. note:: * bounding box", "np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(), height) ] for rect", "min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra = dict(det_scores=det_scores) return bounding_boxes, extra", "<gh_stars>1-10 import os import dlib import numpy as np from face_engine import RESOURCES", "= dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image) n_det = len(detections)", "0), min(rect.right(), width), min(rect.bottom(), height) ] for rect in detections]) return bounding_boxes, dict()", "realtime requires high-end Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3.", "model. .. note:: * face alignment pre-processing used with 5 point shape_predictor. References:", "by bounding boxes face images # default shape (n_faces, 150, 150, 3) face_images", "def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections", "self._shape_predictor = dlib.shape_predictor( os.path.join(RESOURCES, \"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections()", "note:: * bounding box sizes are equal for all detections. * to run", "\"\"\"Dlib pre-trained CNN model with \"Max-Margin Object Detection\" loss function. .. note:: *", "http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self,", "return bounding_boxes, extra class ResNetEmbedder(Embedder, name='resnet', dim=128): \"\"\" Dlib pre-trained face recognition ResNet", "bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned to shape and cropped by bounding boxes face", "= image.shape[0:2] bounding_boxes = np.array([ [ max(rect.left(), 0), max(rect.top(), 0), min(rect.right(), width), min(rect.bottom(),", "https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\" def __init__(self) -> None: self._cnn_face_detector = dlib.cnn_face_detection_model_v1( os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\"))", "name='mmod'): \"\"\"Dlib pre-trained CNN model with \"Max-Margin Object Detection\" loss function. .. note::", "\"models/dlib/shape_predictor_5_face_landmarks.dat\")) def compute_embeddings(self, image, bounding_boxes, **kwargs): shapes = dlib.full_object_detections() for bounding_box in bounding_boxes:", "bounding_box in bounding_boxes: bb = dlib.rectangle(bounding_box[0], bounding_box[1], bounding_box[2], bounding_box[3]) shapes.append(self._shape_predictor(image, bb)) # Aligned", "3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2 \"\"\" def __init__(self) -> None: self._face_encoder = dlib.face_recognition_model_v1( os.path.join(RESOURCES, \"models/dlib/dlib_face_recognition_resnet_model_v1.dat\")) self._shape_predictor", "'models/dlib')) class HOGDetector(Detector, name='hog'): \"\"\"Dlib \"Histogram Oriented Gradients\" model. .. note:: * bounding", "model with \"Max-Margin Object Detection\" loss function. .. note:: * bounding box sizes", "0), max(det.rect.top(), 0), min(det.rect.right(), width), min(det.rect.bottom(), height) ]) det_scores.append(det.confidence) bounding_boxes = np.array(bounding_boxes) extra", "high-end Nvidia GPU with CUDA/cuDNN. References: 1. http://dlib.net/python/index.html 2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html 3. http://dlib.net/files/mmod_human_face_detector.dat.bz2 \"\"\"", "os.path.join(RESOURCES, \"models/dlib/mmod_human_face_detector.dat\")) def detect(self, image): detections = self._cnn_face_detector(image) n_det = len(detections) if n_det" ]
[ "scc(graph) == {'g': ['g', 'i', 'h'], 'd': [ 'd', 'e', 'f'], 'b': ['b',", "['d', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph = { 'a':", "['c'], 'b': ['b']} def test_scc_single_node(): graph = { 'a': [] } assert scc(graph)", "['i'], 'h': ['g'], 'i': [] } assert scc(graph) == {'i': ['i'], 'g': ['g'],", "{'i': ['i'], 'g': ['g'], 'h': [ 'h'], 'd': ['d', 'e', 'f'], 'b': ['b',", "['g'], 'h': [ 'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']} def", "'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': ['h'] } assert", "'i': [] } assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [ 'h'],", "[], 'c': [] } assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']}", "{ 'a': [] } assert scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph =", "'f', 'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph = { 'a': ['b'],", "'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': ['h'] } assert scc(graph) ==", "'a': [] } assert scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph = {", "'d': [ 'd', 'f', 'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph =", "'c', 'a']} def test_scc_no_edges(): graph = { 'a': [], 'b': [], 'c': []", "= { 'a': [], 'b': [], 'c': [] } assert scc(graph) == {'a':", "def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd':", "'h': [ 'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges():", "'d': [ 'd', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph =", "['i'], 'h': ['g'], 'i': ['h'] } assert scc(graph) == {'g': ['g', 'i', 'h'],", "assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph =", "'a']} def test_scc_no_edges(): graph = { 'a': [], 'b': [], 'c': [] }", "{ 'a': [], 'b': [], 'c': [] } assert scc(graph) == {'a': ['a'],", "graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'], 'e':", "'a': ['c'], 'b': ['a'], 'c': ['b'], 'd': ['b', 'f'], 'e': ['d'], 'f': ['e'],", "scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph = { 'a': ['b'], 'b': []", "['c'], 'b': ['a'], 'c': ['b'], 'd': ['b', 'f'], 'e': ['d'], 'f': ['e'], 'g':", "scc(graph) == {'c': ['c', 'b', 'a'], 'd': [ 'd', 'f', 'e'], 'g': ['g',", "== {'g': ['g', 'i', 'h'], 'd': [ 'd', 'e', 'f'], 'b': ['b', 'c',", "['g'], 'i': [] } assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [", "{'g': ['g', 'i', 'h'], 'd': [ 'd', 'e', 'f'], 'b': ['b', 'c', 'a']}", "def test_scc_no_edges(): graph = { 'a': [], 'b': [], 'c': [] } assert", "'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b':", "'c': ['b'], 'd': ['b', 'f'], 'e': ['d'], 'f': ['e'], 'g': ['e', 'h'], 'h':", "= { 'a': ['c'], 'b': ['a'], 'c': ['b'], 'd': ['b', 'f'], 'e': ['d'],", "[ 'd', 'f', 'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph = {", "{'a': ['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph = { 'a': []", "test_scc(): graph = { 'a': ['c'], 'b': ['a'], 'c': ['b'], 'd': ['b', 'f'],", "['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph = { 'a': [] }", "Test Cases: Strongly Connected Components\"\"\" from week1.scc import scc def test_scc(): graph =", "'i', 'h'], 'd': [ 'd', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges():", "'f'], 'e': ['d'], 'f': ['e'], 'g': ['e', 'h'], 'h': ['i'], 'i': ['g'] }", "'g': ['e', 'h'], 'h': ['i'], 'i': ['g'] } assert scc(graph) == {'c': ['c',", "test_scc_reversed_graph(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'],", "'h', 'i']} def test_scc_reversed_graph(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c':", "'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': [] } assert scc(graph) ==", "Connected Components\"\"\" from week1.scc import scc def test_scc(): graph = { 'a': ['c'],", "'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph = { 'a': [], 'b': [],", "'b': ['b']} def test_scc_single_node(): graph = { 'a': [] } assert scc(graph) ==", "['b', 'f'], 'e': ['d'], 'f': ['e'], 'g': ['e', 'h'], 'h': ['i'], 'i': ['g']", "'d': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': []", "[] } assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node():", "assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [ 'd', 'f', 'e'], 'g':", "week1.scc import scc def test_scc(): graph = { 'a': ['c'], 'b': ['a'], 'c':", "scc def test_scc(): graph = { 'a': ['c'], 'b': ['a'], 'c': ['b'], 'd':", "[ 'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph", "'b': ['a'], 'c': ['b'], 'd': ['b', 'f'], 'e': ['d'], 'f': ['e'], 'g': ['e',", "'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph = { 'a': [], 'b':", "'d': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': ['h']", "graph = { 'a': [] } assert scc(graph) == {'a': ['a']} def test_scc_single_edge():", "'e': ['d'], 'f': ['e'], 'g': ['e', 'h'], 'h': ['i'], 'i': ['g'] } assert", "['i'], 'i': ['g'] } assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [", "[ 'd', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph = {", "'h': ['g'], 'i': ['h'] } assert scc(graph) == {'g': ['g', 'i', 'h'], 'd':", "'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'], 'e': ['f', 'g'], 'f':", "'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph = { 'a': ['b'], 'b':", "'a']} def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'],", "'f': ['e'], 'g': ['e', 'h'], 'h': ['i'], 'i': ['g'] } assert scc(graph) ==", "['a']} def test_scc_single_edge(): graph = { 'a': ['b'], 'b': [] } assert scc(graph)", "'i': ['g'] } assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [ 'd',", "['c', 'd'], 'c': ['a'], 'd': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'],", "def test_scc_single_node(): graph = { 'a': [] } assert scc(graph) == {'a': ['a']}", "'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph =", "} assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [ 'h'], 'd': ['d',", "'h'], 'h': ['i'], 'i': ['g'] } assert scc(graph) == {'c': ['c', 'b', 'a'],", "'h': ['g'], 'i': [] } assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h':", "'b': [], 'c': [] } assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b':", "'c': [] } assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']} def", "'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': ['h'] } assert scc(graph) == {'g':", "} assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph", "['d'], 'g': ['i'], 'h': ['g'], 'i': [] } assert scc(graph) == {'i': ['i'],", "== {'c': ['c', 'b', 'a'], 'd': [ 'd', 'f', 'e'], 'g': ['g', 'h',", "'c': ['a'], 'd': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'],", "['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': [] } assert scc(graph)", "scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph = {", "['e', 'h'], 'h': ['i'], 'i': ['g'] } assert scc(graph) == {'c': ['c', 'b',", "'g': ['g'], 'h': [ 'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']}", "['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': [] }", "'i']} def test_scc_reversed_graph(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'],", "['d'], 'g': ['i'], 'h': ['g'], 'i': ['h'] } assert scc(graph) == {'g': ['g',", "['a'], 'd': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i':", "['i'], 'g': ['g'], 'h': [ 'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c',", "'a': [], 'b': [], 'c': [] } assert scc(graph) == {'a': ['a'], 'c':", "def test_scc_reversed_graph(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd':", "['b']} def test_scc_single_node(): graph = { 'a': [] } assert scc(graph) == {'a':", "{ 'a': ['c'], 'b': ['a'], 'c': ['b'], 'd': ['b', 'f'], 'e': ['d'], 'f':", "'g': ['i'], 'h': ['g'], 'i': [] } assert scc(graph) == {'i': ['i'], 'g':", "test_scc_single_edge(): graph = { 'a': ['b'], 'b': [] } assert scc(graph) == {'a':", "} assert scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph = { 'a': ['b'],", "Strongly Connected Components\"\"\" from week1.scc import scc def test_scc(): graph = { 'a':", "'h': ['i'], 'i': ['g'] } assert scc(graph) == {'c': ['c', 'b', 'a'], 'd':", "} assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [ 'd', 'e', 'f'],", "def test_scc(): graph = { 'a': ['c'], 'b': ['a'], 'c': ['b'], 'd': ['b',", "'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph = { 'a': [] } assert", "[], 'b': [], 'c': [] } assert scc(graph) == {'a': ['a'], 'c': ['c'],", "'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph = { 'a': [],", "'b', 'a'], 'd': [ 'd', 'f', 'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph():", "'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'],", "['h'] } assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [ 'd', 'e',", "def test_scc_single_edge(): graph = { 'a': ['b'], 'b': [] } assert scc(graph) ==", "= { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'], 'e': ['f',", "assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [ 'd', 'e', 'f'], 'b':", "'d', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph = { 'a':", "'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b': ['c',", "assert scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph = { 'a': ['b'], 'b':", "test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'],", "\"\"\"Week1 Test Cases: Strongly Connected Components\"\"\" from week1.scc import scc def test_scc(): graph", "scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [ 'h'], 'd': ['d', 'e', 'f'],", "'b': ['c', 'd'], 'c': ['a'], 'd': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g':", "{'c': ['c', 'b', 'a'], 'd': [ 'd', 'f', 'e'], 'g': ['g', 'h', 'i']}", "['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'], 'e': ['f', 'g'], 'f': ['d'],", "['b', 'c', 'a']} def test_scc_no_edges(): graph = { 'a': [], 'b': [], 'c':", "['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b': ['c', 'd'],", "'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph = { 'a': ['b'], 'b': ['c',", "'c', 'a']} def test_scc_node_no_outbound_edges(): graph = { 'a': ['b'], 'b': ['c', 'd'], 'c':", "['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': ['h'] }", "test_scc_no_edges(): graph = { 'a': [], 'b': [], 'c': [] } assert scc(graph)", "'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': [] } assert", "test_scc_single_node(): graph = { 'a': [] } assert scc(graph) == {'a': ['a']} def", "['f', 'g'], 'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': ['h'] } assert scc(graph)", "{'a': ['a']} def test_scc_single_edge(): graph = { 'a': ['b'], 'b': [] } assert", "from week1.scc import scc def test_scc(): graph = { 'a': ['c'], 'b': ['a'],", "'d': ['b', 'f'], 'e': ['d'], 'f': ['e'], 'g': ['e', 'h'], 'h': ['i'], 'i':", "'g': ['i'], 'h': ['g'], 'i': ['h'] } assert scc(graph) == {'g': ['g', 'i',", "== {'a': ['a'], 'c': ['c'], 'b': ['b']} def test_scc_single_node(): graph = { 'a':", "graph = { 'a': ['b'], 'b': [] } assert scc(graph) == {'a': ['a'],", "= { 'a': ['b'], 'b': [] } assert scc(graph) == {'a': ['a'], 'b':", "[] } assert scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph = { 'a':", "assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [ 'h'], 'd': ['d', 'e',", "['c', 'b', 'a'], 'd': [ 'd', 'f', 'e'], 'g': ['g', 'h', 'i']} def", "import scc def test_scc(): graph = { 'a': ['c'], 'b': ['a'], 'c': ['b'],", "{ 'a': ['b'], 'b': [] } assert scc(graph) == {'a': ['a'], 'b': ['b']}", "'d': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_no_edges(): graph = {", "['g'] } assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [ 'd', 'f',", "graph = { 'a': [], 'b': [], 'c': [] } assert scc(graph) ==", "[] } assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [ 'h'], 'd':", "['a'], 'c': ['b'], 'd': ['b', 'f'], 'e': ['d'], 'f': ['e'], 'g': ['e', 'h'],", "['e'], 'g': ['e', 'h'], 'h': ['i'], 'i': ['g'] } assert scc(graph) == {'c':", "= { 'a': [] } assert scc(graph) == {'a': ['a']} def test_scc_single_edge(): graph", "['g'], 'i': ['h'] } assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [", "'h'], 'd': [ 'd', 'e', 'f'], 'b': ['b', 'c', 'a']} def test_scc_node_no_outbound_edges(): graph", "} assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [ 'd', 'f', 'e'],", "== {'i': ['i'], 'g': ['g'], 'h': [ 'h'], 'd': ['d', 'e', 'f'], 'b':", "['g', 'h', 'i']} def test_scc_reversed_graph(): graph = { 'a': ['b'], 'b': ['c', 'd'],", "['b'], 'd': ['b', 'f'], 'e': ['d'], 'f': ['e'], 'g': ['e', 'h'], 'h': ['i'],", "'f': ['d'], 'g': ['i'], 'h': ['g'], 'i': [] } assert scc(graph) == {'i':", "'a'], 'd': [ 'd', 'f', 'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph", "'d', 'f', 'e'], 'g': ['g', 'h', 'i']} def test_scc_reversed_graph(): graph = { 'a':", "{ 'a': ['b'], 'b': ['c', 'd'], 'c': ['a'], 'd': ['e'], 'e': ['f', 'g'],", "['g', 'i', 'h'], 'd': [ 'd', 'e', 'f'], 'b': ['b', 'c', 'a']} def", "Cases: Strongly Connected Components\"\"\" from week1.scc import scc def test_scc(): graph = {", "'d'], 'c': ['a'], 'd': ['e'], 'e': ['f', 'g'], 'f': ['d'], 'g': ['i'], 'h':", "'i': ['h'] } assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [ 'd',", "== {'a': ['a']} def test_scc_single_edge(): graph = { 'a': ['b'], 'b': [] }", "Components\"\"\" from week1.scc import scc def test_scc(): graph = { 'a': ['c'], 'b':", "['d'], 'f': ['e'], 'g': ['e', 'h'], 'h': ['i'], 'i': ['g'] } assert scc(graph)", "graph = { 'a': ['c'], 'b': ['a'], 'c': ['b'], 'd': ['b', 'f'], 'e':" ]
[ "len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def", "['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self):", "// len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod", "base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r = 0", "as np import cfg_test as tcfg from skimage import io from image.mask import", "PrimeIdMasMerger() for iter in self.iterations: # load files masks = np.zeros((512, 512, len(iter)),", "# 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self):", "skimage import io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def", "'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder", "self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod", "+= 1 # comprobamos el número de instancias clean_instaces(out_img) ids, counts = np.unique(out_img,", "iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'],", "in self.iterations: # load files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for i", "import os import unittest import numpy as np import cfg_test as tcfg from", "['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img", "os import unittest import numpy as np import cfg_test as tcfg from skimage", "for i in range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos =", "'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img =", "r = 0 merger = MaskMerger() for iter in self.iterations: # load files", "range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse()", "import unittest import numpy as np import cfg_test as tcfg from skimage import", "def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r", "# comprobamos el número de instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor", "['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9),", "r += 1 # comprobamos el número de instancias clean_instaces(out_img) ids, counts =", "['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512,", "9), dtype=np.uint8) r = 0 merger = MaskMerger() for iter in self.iterations: #", "import io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls):", "files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in range(0, len(iter)): masks[:,", "= 0 merger = PrimeIdMasMerger() for iter in self.iterations: # load files masks", "512 * 9), dtype=np.uint8) r = 0 merger = MaskMerger() for iter in", "= 0 merger = MaskMerger() for iter in self.iterations: # load files masks", "el número de instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor = 255", "image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images()", "out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger = MaskMerger()", "ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img *", "= PrimeIdMasMerger() for iter in self.iterations: # load files masks = np.zeros((512, 512,", "MaskMerger() for iter in self.iterations: # load files masks = np.zeros((512, 512, len(iter)),", "= 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 if", "512, len(iter)), dtype=np.uint8) for i in range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder,", "return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1", "['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def", "] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9), dtype=np.uint8)", "from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations =", "iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img, return_counts=True) factor = 255", "512 * 9), dtype=np.uint8) r = 0 merger = PrimeIdMasMerger() for iter in", "out_img * factor) r += 1 # comprobamos el número de instancias clean_instaces(out_img)", "io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1 # comprobamos el número de instancias", "= np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger = MaskMerger() for", "_load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'],", "de instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids)", "iter in self.iterations: # load files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for", "* factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations =", "// len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 if __name__ ==", "cfg_test as tcfg from skimage import io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger", "@classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations return [", "np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger = PrimeIdMasMerger() for iter", "len(iter)), dtype=np.uint8) for i in range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8)", "MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def", "ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img *", "io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls):", "import numpy as np import cfg_test as tcfg from skimage import io from", "return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png',", "= np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r", "= cls._load_images() @classmethod def _load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'],", "masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in range(0, len(iter)): masks[:, :,", "0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): #", "masks, pos) ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r),", "255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1 # comprobamos el", "io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts =", "['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512 *", "clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self):", "def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations return [ ['image_0_0_0.png'],", "comprobamos el número de instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor =", "= io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts", "iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img,", "len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 if __name__ == \"__main__\":", "cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'],", "factor) r += 1 # comprobamos el número de instancias clean_instaces(out_img) ids, counts", "len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1 # comprobamos el número de", "tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger =", "r = 0 merger = PrimeIdMasMerger() for iter in self.iterations: # load files", "= MaskMerger() for iter in self.iterations: # load files masks = np.zeros((512, 512,", "factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images()", "unittest import numpy as np import cfg_test as tcfg from skimage import io", "class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations", "<reponame>gusriobr/vineyard-sketcher import os import unittest import numpy as np import cfg_test as tcfg", "return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) #", "['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder =", "'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512", "from skimage import io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod", "'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ] def test_apply(self): base_folder = tcfg.resource(\"masks\")", "dtype=np.uint8) r = 0 merger = PrimeIdMasMerger() for iter in self.iterations: # load", "import cfg_test as tcfg from skimage import io from image.mask import MaskMerger, clean_instaces,", "np import cfg_test as tcfg from skimage import io from image.mask import MaskMerger,", "i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids,", "0 merger = MaskMerger() for iter in self.iterations: # load files masks =", "for iter in self.iterations: # load files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8)", "import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod", "clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img", "número de instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor = 255 //", "merger = PrimeIdMasMerger() for iter in self.iterations: # load files masks = np.zeros((512,", "list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img, return_counts=True) factor =", "['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'],", "np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in range(0, len(iter)): masks[:, :, i] =", "masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks,", "np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r +=", "= np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in range(0, len(iter)): masks[:, :, i]", "= tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger", "= np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3,", "TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations return", "io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 if __name__ == \"__main__\": unittest.main()", "* 9), dtype=np.uint8) r = 0 merger = PrimeIdMasMerger() for iter in self.iterations:", "in range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3]))", "class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations", "dtype=np.uint8) r = 0 merger = MaskMerger() for iter in self.iterations: # load", "1 # comprobamos el número de instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True)", "factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1 #", "* factor) r += 1 # comprobamos el número de instancias clean_instaces(out_img) ids,", "out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger = PrimeIdMasMerger()", "= list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img, return_counts=True) factor", "len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img,", "out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations", "0 merger = PrimeIdMasMerger() for iter in self.iterations: # load files masks =", "// len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1 # comprobamos el número", "merger = MaskMerger() for iter in self.iterations: # load files masks = np.zeros((512,", "= 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 class", "np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger = MaskMerger() for iter", "merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids)", "instancias clean_instaces(out_img) ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r),", ":, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos)", "factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2", "counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor)", "dtype=np.uint8) for i in range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos", "= np.zeros((512, 512 * 9), dtype=np.uint8) r = 0 merger = PrimeIdMasMerger() for", "= 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor) r += 1 # comprobamos", "pos = list(map(int, iter[0].split(\"_\")[1:3])) pos.reverse() merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img, return_counts=True)", "as tcfg from skimage import io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class", "@classmethod def _load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'],", "i in range(0, len(iter)): masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8) pos = list(map(int,", "cls._load_images() @classmethod def _load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png',", "io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations", "def _load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png',", "pos.reverse() merger.apply(out_img, masks, pos) ids, counts = np.unique(out_img, return_counts=True) factor = 255 //", "9), dtype=np.uint8) r = 0 merger = PrimeIdMasMerger() for iter in self.iterations: #", "255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase):", "tcfg from skimage import io from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger class TestMaskMerger(unittest.TestCase):", "counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img * factor)", "['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png'] ]", "TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations return", "setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): # iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'],", "PrimeIdMasMerger class TestMaskMerger(unittest.TestCase): @classmethod def setUpClass(cls): cls.iterations = cls._load_images() @classmethod def _load_images(self): #", "# load files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in range(0,", "numpy as np import cfg_test as tcfg from skimage import io from image.mask", "# iterations return [ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png',", "pos) ids, counts = np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/salida_{}.png\".format(r), out_img", "load files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in range(0, len(iter)):", "[ ['image_0_0_0.png'], ['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'],", "* 9), dtype=np.uint8) r = 0 merger = MaskMerger() for iter in self.iterations:", "self.iterations: # load files masks = np.zeros((512, 512, len(iter)), dtype=np.uint8) for i in", "np.unique(out_img, return_counts=True) factor = 255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids))", "255 // len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 if __name__", "len(ids) io.imsave(\"/tmp/output_image.png\".format(r), out_img * factor) self.assertEqual(3, len(ids)) # 0,1,2 class TestPrimeMaskMerger(unittest.TestCase): @classmethod def", "['image_0_100_0.png'], ['image_0_200_0.png'], ['image_0_300_0.png'], ['image_0_300_1.png', 'image_0_300_2.png'], ['image_0_400_0.png', 'image_0_400_1.png'], ['image_0_500_0.png', 'image_0_500_1.png'], ['image_0_600_0.png', 'image_0_600_1.png'], ['image_0_700_0.png'], ['image_0_800_0.png']", "test_apply(self): base_folder = tcfg.resource(\"masks\") out_img = np.zeros((512, 512 * 9), dtype=np.uint8) r =" ]
[ "+ \".json\")] if relfile in substitutions: thelist += list(substitutions[relfile].values()) for newfile in thelist:", "in filestowrite: newfileobj = filestowrite[newfilename] else: try: with open(newfilename, \"r\") as f: newfileobj", "= relpath(rlfile, \"assets\") relfile = rlfile thelist = [join(\"texts\", relfile + \".json\")] if", "= obj[\"Files\"] for rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile = rlfile", "from os.path import join, relpath, normpath from json import load, dump from multiprocessing", "open(newfilename, \"r\") as f: newfileobj = load(f) except: pass #print(\"Cann't read: \" +", "filestowrite[newfilename] = newfileobj for newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\") as f:", "len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"]", "join, relpath, normpath from json import load, dump from multiprocessing import Pool oldpath", "json import load, dump from multiprocessing import Pool oldpath = \"./experimental/translations\" newpath =", "+ oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext =", "thefile) objlist = {} try: with open(oldfile, \"r\") as f: objlist = load(f)", "from os import walk from os.path import join, relpath, normpath from json import", "join(subdir, thefile) objlist = {} try: with open(oldfile, \"r\") as f: objlist =", "if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename] =", "[join(\"texts\", relfile + \".json\")] if relfile in substitutions: thelist += list(substitutions[relfile].values()) for newfile", "f: newfileobj = load(f) except: pass #print(\"Cann't read: \" + newfilename) #raise changed", "= newfileobj for newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj,", "in obj: #print(\"No alternatives for: \" + oldfile) continue denied = obj[\"DeniedAlternatives\"] if", "for i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\"", "not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list()", "= \"./experimental/translations\" newpath = \"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f:", "dict() for subdir, dirs, files in walk(oldpath): for thefile in files: if (not", "files in walk(oldpath): for thefile in files: if (not thefile.endswith(\".json\")) or thefile ==", "\"r\") as f: newfileobj = load(f) except: pass #print(\"Cann't read: \" + newfilename)", "\"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied: if alt", "walk(oldpath): for thefile in files: if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue", "if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile = join(subdir, thefile) objlist", "for newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj, f, ensure_ascii", "in files: if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile = join(subdir,", "{} try: with open(oldfile, \"r\") as f: objlist = load(f) except: print(\"Cann't load:", "multiprocessing import Pool oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions = dict() with", "in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]:", "newfileobj for newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj, f,", "relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile = rlfile thelist = [join(\"texts\", relfile +", "\"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite", "for alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True", "= load(f) except: print(\"Cann't load: \" + oldfile) continue for obj in objlist:", "objlist: if \"DeniedAlternatives\" not in obj: #print(\"No alternatives for: \" + oldfile) continue", "import walk from os.path import join, relpath, normpath from json import load, dump", "oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"]", "continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in relfiles.keys(): #relfile =", "\"assets\") relfile = rlfile thelist = [join(\"texts\", relfile + \".json\")] if relfile in", "+= list(substitutions[relfile].values()) for newfile in thelist: newfileobj = {} newfilename = normpath(join(newpath, newfile))", "as f: substitutions = load(f) filestowrite = dict() for subdir, dirs, files in", "walk from os.path import join, relpath, normpath from json import load, dump from", "print(\"Cann't load: \" + oldfile) continue for obj in objlist: if \"DeniedAlternatives\" not", "#print(\"Cann't read: \" + newfilename) #raise changed = False for i in range(0,", "obj in objlist: if \"DeniedAlternatives\" not in obj: #print(\"No alternatives for: \" +", "alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename] = newfileobj", "\" + oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext", "= \"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f)", "= join(subdir, thefile) objlist = {} try: with open(oldfile, \"r\") as f: objlist", "in filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj, f, ensure_ascii = False, indent", "or thefile == \"substitutions.json\": continue oldfile = join(subdir, thefile) objlist = {} try:", "i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not", "for rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile = rlfile thelist =", "newfileobj = load(f) except: pass #print(\"Cann't read: \" + newfilename) #raise changed =", "relfiles = obj[\"Files\"] for rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile =", "= filestowrite[newfilename] else: try: with open(newfilename, \"r\") as f: newfileobj = load(f) except:", "entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in relfiles.keys(): #relfile = relpath(rlfile,", "0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in relfiles.keys(): #relfile", "= normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj = filestowrite[newfilename] else: try: with", "for subdir, dirs, files in walk(oldpath): for thefile in files: if (not thefile.endswith(\".json\"))", "open(oldfile, \"r\") as f: objlist = load(f) except: print(\"Cann't load: \" + oldfile)", "newfilename) #raise changed = False for i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"]", "filestowrite[newfilename] else: try: with open(newfilename, \"r\") as f: newfileobj = load(f) except: pass", "import Pool oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions = dict() with open(join(newpath,", "{} newfilename = normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj = filestowrite[newfilename] else:", "oldfile = join(subdir, thefile) objlist = {} try: with open(oldfile, \"r\") as f:", "= obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"]", "False for i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if", "not in obj: #print(\"No alternatives for: \" + oldfile) continue denied = obj[\"DeniedAlternatives\"]", "dump from multiprocessing import Pool oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions =", "continue denied = obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles", "\"./experimental/translations\" newpath = \"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions", "+ oldfile) continue for obj in objlist: if \"DeniedAlternatives\" not in obj: #print(\"No", "alternatives for: \" + oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied) == 0:", "continue oldfile = join(subdir, thefile) objlist = {} try: with open(oldfile, \"r\") as", "in thelist: newfileobj = {} newfilename = normpath(join(newpath, newfile)) if newfilename in filestowrite:", "list(substitutions[relfile].values()) for newfile in thelist: newfileobj = {} newfilename = normpath(join(newpath, newfile)) if", "#print(\"No alternatives for: \" + oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied) ==", "open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite = dict() for subdir, dirs,", "try: with open(oldfile, \"r\") as f: objlist = load(f) except: print(\"Cann't load: \"", "changed = True if changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj in filestowrite.items():", "except: print(\"Cann't load: \" + oldfile) continue for obj in objlist: if \"DeniedAlternatives\"", "obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for", "with open(oldfile, \"r\") as f: objlist = load(f) except: print(\"Cann't load: \" +", "in substitutions: thelist += list(substitutions[relfile].values()) for newfile in thelist: newfileobj = {} newfilename", "thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile = join(subdir, thefile) objlist = {}", "alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if", "range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not in newfileobj[i]:", "list() for alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed =", "pass #print(\"Cann't read: \" + newfilename) #raise changed = False for i in", "f: objlist = load(f) except: print(\"Cann't load: \" + oldfile) continue for obj", "filestowrite: newfileobj = filestowrite[newfilename] else: try: with open(newfilename, \"r\") as f: newfileobj =", "if changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\")", "as f: objlist = load(f) except: print(\"Cann't load: \" + oldfile) continue for", "as f: newfileobj = load(f) except: pass #print(\"Cann't read: \" + newfilename) #raise", "relfile in substitutions: thelist += list(substitutions[relfile].values()) for newfile in thelist: newfileobj = {}", "True if changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj in filestowrite.items(): with open(newfilename,", "import join, relpath, normpath from json import load, dump from multiprocessing import Pool", "obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile", "= True if changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj in filestowrite.items(): with", "newfileobj = filestowrite[newfilename] else: try: with open(newfilename, \"r\") as f: newfileobj = load(f)", "= obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\")", "\"DeniedAlternatives\" not in obj: #print(\"No alternatives for: \" + oldfile) continue denied =", "load(f) except: pass #print(\"Cann't read: \" + newfilename) #raise changed = False for", "thefile == \"substitutions.json\": continue oldfile = join(subdir, thefile) objlist = {} try: with", "if \"DeniedAlternatives\" not in obj: #print(\"No alternatives for: \" + oldfile) continue denied", "read: \" + newfilename) #raise changed = False for i in range(0, len(newfileobj)):", "newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt)", "oldfile) continue for obj in objlist: if \"DeniedAlternatives\" not in obj: #print(\"No alternatives", "os import walk from os.path import join, relpath, normpath from json import load,", "for newfile in thelist: newfileobj = {} newfilename = normpath(join(newpath, newfile)) if newfilename", "if newfilename in filestowrite: newfileobj = filestowrite[newfilename] else: try: with open(newfilename, \"r\") as", "= list() for alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed", "rlfile thelist = [join(\"texts\", relfile + \".json\")] if relfile in substitutions: thelist +=", "dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite = dict() for", "newfileobj = {} newfilename = normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj =", "relpath(rlfile, \"assets\") relfile = rlfile thelist = [join(\"texts\", relfile + \".json\")] if relfile", "if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied: if", "else: try: with open(newfilename, \"r\") as f: newfileobj = load(f) except: pass #print(\"Cann't", "not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied: if alt in", "newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj in", "substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite =", "except: pass #print(\"Cann't read: \" + newfilename) #raise changed = False for i", "\"substitutions.json\": continue oldfile = join(subdir, thefile) objlist = {} try: with open(oldfile, \"r\")", "entext): continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in", "load: \" + oldfile) continue for obj in objlist: if \"DeniedAlternatives\" not in", "if relfile in substitutions: thelist += list(substitutions[relfile].values()) for newfile in thelist: newfileobj =", "== entext): continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt", "f: substitutions = load(f) filestowrite = dict() for subdir, dirs, files in walk(oldpath):", "obj[\"Files\"] for rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile = rlfile thelist", "newfilename in filestowrite: newfileobj = filestowrite[newfilename] else: try: with open(newfilename, \"r\") as f:", "changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\") as", "in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile = rlfile thelist = [join(\"texts\", relfile", "substitutions: thelist += list(substitutions[relfile].values()) for newfile in thelist: newfileobj = {} newfilename =", "#!/bin/python from os import walk from os.path import join, relpath, normpath from json", "relpath, normpath from json import load, dump from multiprocessing import Pool oldpath =", "#raise changed = False for i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] ==", "(newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for", "newpath = \"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions =", "substitutions = load(f) filestowrite = dict() for subdir, dirs, files in walk(oldpath): for", "thelist: newfileobj = {} newfilename = normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj", "+ newfilename) #raise changed = False for i in range(0, len(newfileobj)): if not", "with open(newfilename, \"w\") as f: dump(newfileobj, f, ensure_ascii = False, indent = 2)", "= rlfile thelist = [join(\"texts\", relfile + \".json\")] if relfile in substitutions: thelist", "with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite = dict() for subdir,", "newfilename = normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj = filestowrite[newfilename] else: try:", "newfile in thelist: newfileobj = {} newfilename = normpath(join(newpath, newfile)) if newfilename in", "Pool oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\")", "for thefile in files: if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile", "obj: #print(\"No alternatives for: \" + oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied)", "= [join(\"texts\", relfile + \".json\")] if relfile in substitutions: thelist += list(substitutions[relfile].values()) for", "newfilename, newfileobj in filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj, f, ensure_ascii =", "continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename] = newfileobj for newfilename, newfileobj", "subdir, dirs, files in walk(oldpath): for thefile in files: if (not thefile.endswith(\".json\")) or", "newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue", "import load, dump from multiprocessing import Pool oldpath = \"./experimental/translations\" newpath = \"./translations\"", "if len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile", "thefile in files: if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile =", "for obj in objlist: if \"DeniedAlternatives\" not in obj: #print(\"No alternatives for: \"", "newfile)) if newfilename in filestowrite: newfileobj = filestowrite[newfilename] else: try: with open(newfilename, \"r\")", "= load(f) filestowrite = dict() for subdir, dirs, files in walk(oldpath): for thefile", "thelist = [join(\"texts\", relfile + \".json\")] if relfile in substitutions: thelist += list(substitutions[relfile].values())", "\"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite = dict() for subdir, dirs, files", "for: \" + oldfile) continue denied = obj[\"DeniedAlternatives\"] if len(denied) == 0: continue", "normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj = filestowrite[newfilename] else: try: with open(newfilename,", "try: with open(newfilename, \"r\") as f: newfileobj = load(f) except: pass #print(\"Cann't read:", "with open(newfilename, \"r\") as f: newfileobj = load(f) except: pass #print(\"Cann't read: \"", "== \"substitutions.json\": continue oldfile = join(subdir, thefile) objlist = {} try: with open(oldfile,", "relfile = rlfile thelist = [join(\"texts\", relfile + \".json\")] if relfile in substitutions:", "= load(f) except: pass #print(\"Cann't read: \" + newfilename) #raise changed = False", "= False for i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue", "== 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in relfiles.keys():", "if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] =", "in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename] = newfileobj for", "filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj, f, ensure_ascii = False, indent =", "dirs, files in walk(oldpath): for thefile in files: if (not thefile.endswith(\".json\")) or thefile", "\" + newfilename) #raise changed = False for i in range(0, len(newfileobj)): if", "in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext): continue if \"DeniedAlternatives\" not in", "len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles = obj[\"Files\"] for rlfile in", "rlfile in relfiles.keys(): #relfile = relpath(rlfile, \"assets\") relfile = rlfile thelist = [join(\"texts\",", "denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename]", "denied = obj[\"DeniedAlternatives\"] if len(denied) == 0: continue entext = obj[\"Texts\"][\"Eng\"] relfiles =", "continue for obj in objlist: if \"DeniedAlternatives\" not in obj: #print(\"No alternatives for:", "= dict() with open(join(newpath, \"substitutions.json\"),\"r\") as f: substitutions = load(f) filestowrite = dict()", "oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions = dict() with open(join(newpath, \"substitutions.json\"),\"r\") as", "objlist = {} try: with open(oldfile, \"r\") as f: objlist = load(f) except:", "objlist = load(f) except: print(\"Cann't load: \" + oldfile) continue for obj in", "load(f) except: print(\"Cann't load: \" + oldfile) continue for obj in objlist: if", "changed = False for i in range(0, len(newfileobj)): if not (newfileobj[i][\"Texts\"][\"Eng\"] == entext):", "in denied: if alt in newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed:", "load, dump from multiprocessing import Pool oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions", "= {} try: with open(oldfile, \"r\") as f: objlist = load(f) except: print(\"Cann't", "\" + oldfile) continue for obj in objlist: if \"DeniedAlternatives\" not in obj:", "= {} newfilename = normpath(join(newpath, newfile)) if newfilename in filestowrite: newfileobj = filestowrite[newfilename]", "#relfile = relpath(rlfile, \"assets\") relfile = rlfile thelist = [join(\"texts\", relfile + \".json\")]", "= dict() for subdir, dirs, files in walk(oldpath): for thefile in files: if", "in objlist: if \"DeniedAlternatives\" not in obj: #print(\"No alternatives for: \" + oldfile)", "filestowrite = dict() for subdir, dirs, files in walk(oldpath): for thefile in files:", "files: if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile = join(subdir, thefile)", "normpath from json import load, dump from multiprocessing import Pool oldpath = \"./experimental/translations\"", "os.path import join, relpath, normpath from json import load, dump from multiprocessing import", "relfile + \".json\")] if relfile in substitutions: thelist += list(substitutions[relfile].values()) for newfile in", "\".json\")] if relfile in substitutions: thelist += list(substitutions[relfile].values()) for newfile in thelist: newfileobj", "continue if \"DeniedAlternatives\" not in newfileobj[i]: newfileobj[i][\"DeniedAlternatives\"] = list() for alt in denied:", "newfileobj[i][\"DeniedAlternatives\"]: continue newfileobj[i][\"DeniedAlternatives\"].append(alt) changed = True if changed: filestowrite[newfilename] = newfileobj for newfilename,", "newfileobj in filestowrite.items(): with open(newfilename, \"w\") as f: dump(newfileobj, f, ensure_ascii = False,", "thelist += list(substitutions[relfile].values()) for newfile in thelist: newfileobj = {} newfilename = normpath(join(newpath,", "\"r\") as f: objlist = load(f) except: print(\"Cann't load: \" + oldfile) continue", "from json import load, dump from multiprocessing import Pool oldpath = \"./experimental/translations\" newpath", "from multiprocessing import Pool oldpath = \"./experimental/translations\" newpath = \"./translations\" substitutions = dict()", "load(f) filestowrite = dict() for subdir, dirs, files in walk(oldpath): for thefile in", "in walk(oldpath): for thefile in files: if (not thefile.endswith(\".json\")) or thefile == \"substitutions.json\":", "(not thefile.endswith(\".json\")) or thefile == \"substitutions.json\": continue oldfile = join(subdir, thefile) objlist =" ]
[ "Generator -------------------------------------- (c) 2021 - Stanley Solutions - <NAME> This application serves an", "Application Base app = FastAPI() # Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"),", "response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str = Form(...)): print(playlist) return page(request=request, url=playlist)", "(c) 2021 - Stanley Solutions - <NAME> This application serves an interface to", "On Domain Name domain = urlparse(url).netloc if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url)", "# Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return page(request=request) #", "spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", )", "Website Playlist File Generator -------------------------------------- (c) 2021 - Stanley Solutions - <NAME> This", "page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") #", "HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates # Locals import", "apple_music_client from formatter import playlist_html_table # Application Base app = FastAPI() # Mount", "Name domain = urlparse(url).netloc if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify'", "Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str = Form(...)): print(playlist)", "# Return Template Response Using Data return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\":", "async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request:", "JOE Website Playlist File Generator -------------------------------------- (c) 2021 - Stanley Solutions - <NAME>", "= None): \"\"\"Generate the HTML Page Content Using any Provided Playlist URL\"\"\" data", "from fastapi.templating import Jinja2Templates # Locals import spotify_client import apple_music_client from formatter import", "Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url:", "None: # \"Switch\" On Domain Name domain = urlparse(url).netloc if 'music.apple' in domain:", "StaticFiles from fastapi.templating import Jinja2Templates # Locals import spotify_client import apple_music_client from formatter", "Content Using any Provided Playlist URL\"\"\" data = \"\" if url != None:", "################################################################################ \"\"\" DJ JOE Website Playlist File Generator -------------------------------------- (c) 2021 - Stanley", "'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url)", "= spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\",", "<NAME> This application serves an interface to allow the recording of Apple Music", "urlparse from fastapi import FastAPI, Request, Form from fastapi.responses import HTMLResponse, RedirectResponse from", "any Provided Playlist URL\"\"\" data = \"\" if url != None: # \"Switch\"", "File Generator -------------------------------------- (c) 2021 - Stanley Solutions - <NAME> This application serves", "from formatter import playlist_html_table # Application Base app = FastAPI() # Mount the", "# Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load", "in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist,", "'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data = playlist_html_table(", "= client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template", "Locals import spotify_client import apple_music_client from formatter import playlist_html_table # Application Base app", "templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data, }, ) # Main Application Response", "allow the recording of Apple Music or Spotify playlists. \"\"\" ################################################################################ # Requirements", "name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url: str = None): \"\"\"Generate the", "url: str = None): \"\"\"Generate the HTML Page Content Using any Provided Playlist", "Form from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import", "Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request:", "fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates # Locals import spotify_client import apple_music_client", "def page(request: Request, url: str = None): \"\"\"Generate the HTML Page Content Using", "request, \"playlist_table\": data, }, ) # Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def", "Apple Music or Spotify playlists. \"\"\" ################################################################################ # Requirements from urllib.parse import urlparse", "DJ JOE Website Playlist File Generator -------------------------------------- (c) 2021 - Stanley Solutions -", "response_class=HTMLResponse) async def root(request: Request): return page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\")", "in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data = playlist_html_table( playlist=playlist,", "recording of Apple Music or Spotify playlists. \"\"\" ################################################################################ # Requirements from urllib.parse", "playlist, tracks = client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) #", "Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url: str =", "data, }, ) # Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request):", "RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates # Locals import spotify_client", "classes=\"\", ) # Return Template Response Using Data return templates.TemplateResponse( \"index.html\", { \"request\":", "{ \"request\": request, \"playlist_table\": data, }, ) # Main Application Response @app.get(\"/\", response_class=HTMLResponse)", "Response Using Data return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data, }, )", "table_id=\"playlist\", classes=\"\", ) # Return Template Response Using Data return templates.TemplateResponse( \"index.html\", {", "if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client =", "fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates #", "data = \"\" if url != None: # \"Switch\" On Domain Name domain", "= \"\" if url != None: # \"Switch\" On Domain Name domain =", "Jinja2Templates # Locals import spotify_client import apple_music_client from formatter import playlist_html_table # Application", "spotify_client import apple_music_client from formatter import playlist_html_table # Application Base app = FastAPI()", "-------------------------------------- (c) 2021 - Stanley Solutions - <NAME> This application serves an interface", "str = None): \"\"\"Generate the HTML Page Content Using any Provided Playlist URL\"\"\"", "= urlparse(url).netloc if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain:", "or Spotify playlists. \"\"\" ################################################################################ # Requirements from urllib.parse import urlparse from fastapi", ") # Return Template Response Using Data return templates.TemplateResponse( \"index.html\", { \"request\": request,", "\"\"\" ################################################################################ # Requirements from urllib.parse import urlparse from fastapi import FastAPI, Request,", "2021 - Stanley Solutions - <NAME> This application serves an interface to allow", "# Application Base app = FastAPI() # Mount the Static File Path app.mount(\"/static\",", "\"\" if url != None: # \"Switch\" On Domain Name domain = urlparse(url).netloc", "return page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\")", "urlparse(url).netloc if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client", "RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str =", "# Locals import spotify_client import apple_music_client from formatter import playlist_html_table # Application Base", "of Apple Music or Spotify playlists. \"\"\" ################################################################################ # Requirements from urllib.parse import", "Jinja2Templates(directory=\"templates\") def page(request: Request, url: str = None): \"\"\"Generate the HTML Page Content", "# Requirements from urllib.parse import urlparse from fastapi import FastAPI, Request, Form from", "################################################################################ # Requirements from urllib.parse import urlparse from fastapi import FastAPI, Request, Form", "app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url: str = None):", "page(request: Request, url: str = None): \"\"\"Generate the HTML Page Content Using any", "import FastAPI, Request, Form from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles", "# Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def", "\"request\": request, \"playlist_table\": data, }, ) # Main Application Response @app.get(\"/\", response_class=HTMLResponse) async", "}, ) # Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return", "@app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def", "\"\"\" DJ JOE Website Playlist File Generator -------------------------------------- (c) 2021 - Stanley Solutions", "the recording of Apple Music or Spotify playlists. \"\"\" ################################################################################ # Requirements from", "tracks = client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return", "FastAPI() # Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\")", "Playlist File Generator -------------------------------------- (c) 2021 - Stanley Solutions - <NAME> This application", "import Jinja2Templates # Locals import spotify_client import apple_music_client from formatter import playlist_html_table #", "urllib.parse import urlparse from fastapi import FastAPI, Request, Form from fastapi.responses import HTMLResponse,", "tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template Response Using Data return templates.TemplateResponse( \"index.html\",", "fastapi import FastAPI, Request, Form from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import", "return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data, }, ) # Main Application", "Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return page(request=request) # Redirect", "= Jinja2Templates(directory=\"templates\") def page(request: Request, url: str = None): \"\"\"Generate the HTML Page", "client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template Response", "- <NAME> This application serves an interface to allow the recording of Apple", "client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks =", "domain = urlparse(url).netloc if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in", "= apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client()", "Music or Spotify playlists. \"\"\" ################################################################################ # Requirements from urllib.parse import urlparse from", "\"\"\"Generate the HTML Page Content Using any Provided Playlist URL\"\"\" data = \"\"", "# Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str = Form(...)):", "apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data", "import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates # Locals", "@app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return page(request=request) # Redirect for Playlist Endpoint", "fastapi.templating import Jinja2Templates # Locals import spotify_client import apple_music_client from formatter import playlist_html_table", "Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist", "= FastAPI() # Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates =", "URL\"\"\" data = \"\" if url != None: # \"Switch\" On Domain Name", "import StaticFiles from fastapi.templating import Jinja2Templates # Locals import spotify_client import apple_music_client from", "return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str", "import spotify_client import apple_music_client from formatter import playlist_html_table # Application Base app =", "Spotify playlists. \"\"\" ################################################################################ # Requirements from urllib.parse import urlparse from fastapi import", "!= None: # \"Switch\" On Domain Name domain = urlparse(url).netloc if 'music.apple' in", "root(request: Request): return page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect():", "Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async", "import apple_music_client from formatter import playlist_html_table # Application Base app = FastAPI() #", "\"playlist_table\": data, }, ) # Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request:", "@app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str = Form(...)): print(playlist) return page(request=request,", "application serves an interface to allow the recording of Apple Music or Spotify", "interface to allow the recording of Apple Music or Spotify playlists. \"\"\" ################################################################################", "StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url: str = None): \"\"\"Generate", "Domain Name domain = urlparse(url).netloc if 'music.apple' in domain: client = apple_music_client.ApplePlaylister(url) elif", "\"index.html\", { \"request\": request, \"playlist_table\": data, }, ) # Main Application Response @app.get(\"/\",", "url != None: # \"Switch\" On Domain Name domain = urlparse(url).netloc if 'music.apple'", "Template Response Using Data return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data, },", "client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\",", "elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data =", "Playlist URL\"\"\" data = \"\" if url != None: # \"Switch\" On Domain", "None): \"\"\"Generate the HTML Page Content Using any Provided Playlist URL\"\"\" data =", "Request): return page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return", "formatter import playlist_html_table # Application Base app = FastAPI() # Mount the Static", "to allow the recording of Apple Music or Spotify playlists. \"\"\" ################################################################################ #", "app = FastAPI() # Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates", "- Stanley Solutions - <NAME> This application serves an interface to allow the", "def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request,", "from urllib.parse import urlparse from fastapi import FastAPI, Request, Form from fastapi.responses import", "data = playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template Response Using", "Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return page(request=request) # Redirect for Playlist", "Page Content Using any Provided Playlist URL\"\"\" data = \"\" if url !=", "the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request,", "Using Data return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data, }, ) #", "domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks = client() data = playlist_html_table( playlist=playlist, tracks=tracks,", "import urlparse from fastapi import FastAPI, Request, Form from fastapi.responses import HTMLResponse, RedirectResponse", "playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template Response Using Data return templates.TemplateResponse(", "<gh_stars>1-10 ################################################################################ \"\"\" DJ JOE Website Playlist File Generator -------------------------------------- (c) 2021 -", "playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template Response Using Data return", "= playlist_html_table( playlist=playlist, tracks=tracks, table_id=\"playlist\", classes=\"\", ) # Return Template Response Using Data", "serves an interface to allow the recording of Apple Music or Spotify playlists.", "File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url: str", "load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist:", "an interface to allow the recording of Apple Music or Spotify playlists. \"\"\"", "for Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\",", "if url != None: # \"Switch\" On Domain Name domain = urlparse(url).netloc if", "def root(request: Request): return page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async def", "This application serves an interface to allow the recording of Apple Music or", "\"Switch\" On Domain Name domain = urlparse(url).netloc if 'music.apple' in domain: client =", "FastAPI, Request, Form from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from", "HTML Page Content Using any Provided Playlist URL\"\"\" data = \"\" if url", "async def root(request: Request): return page(request=request) # Redirect for Playlist Endpoint @app.get(\"/load_playlist\") async", "templates = Jinja2Templates(directory=\"templates\") def page(request: Request, url: str = None): \"\"\"Generate the HTML", "the HTML Page Content Using any Provided Playlist URL\"\"\" data = \"\" if", "Provided Playlist URL\"\"\" data = \"\" if url != None: # \"Switch\" On", "Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse) async def load_playlist(request: Request, playlist: str = Form(...)): print(playlist) return", "Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return page(request=request) # Redirect for", "Playlist Endpoint @app.get(\"/load_playlist\") async def load_playlist_redirect(): return RedirectResponse(\"/\") # Load Playlist @app.post(\"/load_playlist\", response_class=HTMLResponse)", "Solutions - <NAME> This application serves an interface to allow the recording of", "Stanley Solutions - <NAME> This application serves an interface to allow the recording", "Request, url: str = None): \"\"\"Generate the HTML Page Content Using any Provided", "from fastapi import FastAPI, Request, Form from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles", "Base app = FastAPI() # Mount the Static File Path app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")", "Return Template Response Using Data return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data,", "Data return templates.TemplateResponse( \"index.html\", { \"request\": request, \"playlist_table\": data, }, ) # Main", "from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates # Locals import spotify_client import", "domain: client = apple_music_client.ApplePlaylister(url) elif 'spotify' in domain: client = spotify_client.SpotifyPlaylister(url) playlist, tracks", "Request, Form from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating", "playlist_html_table # Application Base app = FastAPI() # Mount the Static File Path", "import playlist_html_table # Application Base app = FastAPI() # Mount the Static File", "Using any Provided Playlist URL\"\"\" data = \"\" if url != None: #", "# \"Switch\" On Domain Name domain = urlparse(url).netloc if 'music.apple' in domain: client", ") # Main Application Response @app.get(\"/\", response_class=HTMLResponse) async def root(request: Request): return page(request=request)", "playlists. \"\"\" ################################################################################ # Requirements from urllib.parse import urlparse from fastapi import FastAPI,", "from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates", "Requirements from urllib.parse import urlparse from fastapi import FastAPI, Request, Form from fastapi.responses" ]
[ "= preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit',", "parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie features, input to model\", default=[]) parser.add_argument(\"-e\",", "= data[:,:-1] y_data = data[:,-1] # scaled x_data = preprocessing.scale(x_data) models = [", "windows from a single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str,", "can distinguish between # happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1)", "k_folds: x_train, x_test = x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train)", "sklearn import preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core", "linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates", "of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str)", "axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds", "includes single class for key, clf in models: scores = {'f1':[], 'acc':[]} for", "args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print 'condition', condition results = {'labels':[],", "y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc", "x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 =", "'w')) # end of function #--------- if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw,", "data = np.loadtxt(fname, delimiter=',') # delete neutral to see if we can distinguish", "movie features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\",", "meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str) args = parser.parse_args()", "args = parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print", "from sklearn import linear_model from sklearn import metrics from sklearn import preprocessing from", "for fname in fnames: print 'classifying: %s' % fname label = fname.split('/')[-1] data", "'classifying: %s' % fname label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete", "= [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators", "= np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.)", "we can distinguish between # happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a =", "range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data =", "if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo, 'mo') if __name__ == \"__main__\": main()", "delimiter=',') # delete neutral to see if we can distinguish between # happy/sad", "main(): ''' Computes cross-validation by holding out a contiguous block of windows from", "test = split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1]", "Computes cross-validation by holding out a contiguous block of windows from a single", "contiguous block of windows from a single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser()", "condition): print 'condition', condition results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)}", "of function #--------- if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo:", "for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str) args =", "emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music", "sklearn import metrics from sklearn import preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble", "metavar='mu', type=str, nargs='+', help=\"file containing music features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw',", "np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds", "parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music features, input to model\", default=[]) parser.add_argument(\"-mw\",", "linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error because test", "class for key, clf in models: scores = {'f1':[], 'acc':[]} for (train, test)", "y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred)", "= metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc)", "metrics from sklearn import preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier", "'condition', condition results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds =", "N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error because test includes single class for", "metavar='mw', type=str, nargs='+', help=\"file containing music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo',", "model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie features, input to model\",", "for (train, test) in k_folds: x_train, x_test = x_data[train], x_data[test] y_train, y_test =", "numpy as np from collections import defaultdict from sklearn import linear_model from sklearn", "models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf',", "x_data = data[:,:-1] y_data = data[:,-1] # scaled x_data = preprocessing.scale(x_data) models =", "'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10 for fname in fnames: print 'classifying:", "a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i in range(folds): test = split_groups[i]", "{'f1':[], 'acc':[]} for (train, test) in k_folds: x_train, x_test = x_data[train], x_data[test] y_train,", "y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred,", "music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie", "type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str) args = parser.parse_args() output_file", "yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #--------- if args.mu: process_condition(args.mu, 'mu') if", "features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie features,", "= x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test)", "linear_model from sklearn import metrics from sklearn import preprocessing from sklearn.dummy import DummyClassifier", "help=\"file containing music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file", "input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie features, input", "split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i in range(folds): test = split_groups[i] train", "= data[:,-1] # scaled x_data = preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy =", "= y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted')", "y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end", "from collections import defaultdict from sklearn import linear_model from sklearn import metrics from", "np from collections import defaultdict from sklearn import linear_model from sklearn import metrics", "b_folds)) k_folds = [] for i in range(folds): test = split_groups[i] train =", "# delete neutral to see if we can distinguish between # happy/sad data", "(train, test) in k_folds: x_train, x_test = x_data[train], x_data[test] y_train, y_test = y_data[train],", "scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #--------- if args.mu:", "import RandomForestClassifier from permute.core import one_sample def main(): ''' Computes cross-validation by holding", "print 'condition', condition results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds", "containing music features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing", "in k_folds: x_train, x_test = x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train,", "split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data = data[:,-1] # scaled x_data =", "import defaultdict from sklearn import linear_model from sklearn import metrics from sklearn import", "group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds -", "import math import yaml import numpy as np from collections import defaultdict from", "np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music features, input", "clf in models: scores = {'f1':[], 'acc':[]} for (train, test) in k_folds: x_train,", "single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing", "containing movie features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for", "condition results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10", "y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc =", "'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10 for fname in fnames:", "import preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core import", "collections import defaultdict from sklearn import linear_model from sklearn import metrics from sklearn", "split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i in range(folds): test =", "= split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data", "results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #--------- if args.mu: process_condition(args.mu, 'mu')", "folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = []", "music features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking", "help=\"output with pickle results\", type=str) args = parser.parse_args() output_file = args.output_file N_ESTIMATORS =", "args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo, 'mo') if __name__", "out a contiguous block of windows from a single emotion. ''' np.random.seed(1) parser", "import yaml import numpy as np from collections import defaultdict from sklearn import", "default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking features, input to model\", default=[])", "permute.core import one_sample def main(): ''' Computes cross-validation by holding out a contiguous", "happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds", "= {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10 for fname", "parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str) args = parser.parse_args() output_file = args.output_file", "test)) x_data = data[:,:-1] y_data = data[:,-1] # scaled x_data = preprocessing.scale(x_data) models", "cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error", "models: scores = {'f1':[], 'acc':[]} for (train, test) in k_folds: x_train, x_test =", "# end of function #--------- if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw')", "features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking features,", "single class for key, clf in models: scores = {'f1':[], 'acc':[]} for (train,", "b_folds = folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds", "- a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for", "a single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file", "args.estimators def process_condition(fnames, condition): print 'condition', condition results = {'labels':[], 'baseline': defaultdict(list), 'logit':", "y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test,", "= clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba =", "metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) #", "features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\", type=int,", "scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #--------- if", "data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds =", "RandomForestClassifier from permute.core import one_sample def main(): ''' Computes cross-validation by holding out", "type=str, nargs='+', help=\"file containing music features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str,", "in range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data", "parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with", "print 'classifying: %s' % fname label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') #", "\"--estimators\", help=\"number of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle", "if we can distinguish between # happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a", "if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo, 'mo') if", "model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\",", "preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core import one_sample", "metavar='mo', type=str, nargs='+', help=\"file containing movie features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\",", "[] for i in range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:]))", "k_folds = [] for i in range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i]", "process_condition(fnames, condition): print 'condition', condition results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf':", "_f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1)", "sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core import one_sample def main():", "= args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print 'condition', condition results =", "scores = {'f1':[], 'acc':[]} for (train, test) in k_folds: x_train, x_test = x_data[train],", "np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds =", "type=str) args = parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition):", "from sklearn.ensemble import RandomForestClassifier from permute.core import one_sample def main(): ''' Computes cross-validation", "= np.loadtxt(fname, delimiter=',') # delete neutral to see if we can distinguish between", "% fname label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete neutral to", "because test includes single class for key, clf in models: scores = {'f1':[],", "model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking features, input to model\",", "('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error because test includes", "results\", type=str) args = parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames,", "results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10 for", "'rf': defaultdict(list)} folds = 10 for fname in fnames: print 'classifying: %s' %", "output_file = args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print 'condition', condition results", "split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data =", "i in range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test))", "split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i in", "preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()),", "x_test = x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred =", "_acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml',", "math import yaml import numpy as np from collections import defaultdict from sklearn", "scaled x_data = preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20,", "x_data = preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)),", "for key, clf in models: scores = {'f1':[], 'acc':[]} for (train, test) in", "DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ]", "as np from collections import defaultdict from sklearn import linear_model from sklearn import", "import DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core import one_sample def main(): '''", "''' Computes cross-validation by holding out a contiguous block of windows from a", "argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music features, input to model\", default=[])", "to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking features, input to", "'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) #", "containing music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing", "= 10 for fname in fnames: print 'classifying: %s' % fname label =", "N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print 'condition', condition results = {'labels':[], 'baseline':", "np.loadtxt(fname, delimiter=',') # delete neutral to see if we can distinguish between #", "x_train, x_test = x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred", "= argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music features, input to model\",", "results['labels'].append(label) # roc_auc generates error because test includes single class for key, clf", "yaml import numpy as np from collections import defaultdict from sklearn import linear_model", "k_folds.append((train, test)) x_data = data[:,:-1] y_data = data[:,-1] # scaled x_data = preprocessing.scale(x_data)", "clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test,", "import argparse import math import yaml import numpy as np from collections import", "to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\",", "distinguish between # happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b", "sklearn.ensemble import RandomForestClassifier from permute.core import one_sample def main(): ''' Computes cross-validation by", "parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music features, input to", "in fnames: print 'classifying: %s' % fname label = fname.split('/')[-1] data = np.loadtxt(fname,", "of windows from a single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu',", "defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10 for fname in fnames: print", "default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output", "10 for fname in fnames: print 'classifying: %s' % fname label = fname.split('/')[-1]", "= np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds - a_folds split_groups = []", "math.floor(folds/2.) b_folds = folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds))", "= [] for i in range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i] +", "label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete neutral to see if", "help=\"file containing movie features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators", "estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str) args", "fname label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete neutral to see", "# roc_auc generates error because test includes single class for key, clf in", "import one_sample def main(): ''' Computes cross-validation by holding out a contiguous block", "] results['labels'].append(label) # roc_auc generates error because test includes single class for key,", "#--------- if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo, 'mo')", "in models: scores = {'f1':[], 'acc':[]} for (train, test) in k_folds: x_train, x_test", "to see if we can distinguish between # happy/sad data = np.delete(data, np.where(data[:,-1]==0),", "clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test)", "= math.floor(folds/2.) b_folds = folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0],", "np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0],", "= folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds =", "y_data = data[:,-1] # scaled x_data = preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy", "('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error because", "RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error because test includes single", "[] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i in range(folds): test", "a_folds = math.floor(folds/2.) b_folds = folds - a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds))", "y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc']))", "np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds - a_folds split_groups", "from a single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+',", "default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\", type=str) args = parser.parse_args() output_file =", "nargs='+', help=\"file containing music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+',", "+ split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data = data[:,-1] # scaled x_data", "defaultdict(list)} folds = 10 for fname in fnames: print 'classifying: %s' % fname", "test includes single class for key, clf in models: scores = {'f1':[], 'acc':[]}", "import metrics from sklearn import preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble import", "with pickle results\", type=str) args = parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators", "def main(): ''' Computes cross-validation by holding out a contiguous block of windows", "y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of", "defaultdict from sklearn import linear_model from sklearn import metrics from sklearn import preprocessing", "folds = 10 for fname in fnames: print 'classifying: %s' % fname label", "import numpy as np from collections import defaultdict from sklearn import linear_model from", "by holding out a contiguous block of windows from a single emotion. '''", "%s' % fname label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete neutral", "roc_auc generates error because test includes single class for key, clf in models:", "from permute.core import one_sample def main(): ''' Computes cross-validation by holding out a", "end of function #--------- if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if", "input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking features, input", "results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #--------- if args.mu: process_condition(args.mu,", "parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print 'condition', condition", "cross-validation by holding out a contiguous block of windows from a single emotion.", "between # happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b =", "input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of estimators for meta-classifiers\", type=int, default=100)", "parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file containing music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\",", "help=\"number of estimators for meta-classifiers\", type=int, default=100) parser.add_argument(\"-o\", \"--output_file\", help=\"output with pickle results\",", "from sklearn import preprocessing from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from", "DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core import one_sample def main(): ''' Computes", "fname in fnames: print 'classifying: %s' % fname label = fname.split('/')[-1] data =", "import linear_model from sklearn import metrics from sklearn import preprocessing from sklearn.dummy import", "block of windows from a single emotion. ''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\",", "fnames: print 'classifying: %s' % fname label = fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',')", "x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test] clf.fit(x_train, y_train) y_pred = clf.predict(x_test) _f1", "{'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list), 'rf': defaultdict(list)} folds = 10 for fname in", "see if we can distinguish between # happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0)", "= clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function", "open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #--------- if args.mu: process_condition(args.mu, 'mu') if args.mw:", "function #--------- if args.mu: process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo,", "a_folds split_groups = [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i", "'acc':[]} for (train, test) in k_folds: x_train, x_test = x_data[train], x_data[test] y_train, y_test", "np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data = data[:,-1] # scaled", "metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1']))", "delete neutral to see if we can distinguish between # happy/sad data =", "= [] split_groups.extend(np.array_split(group_a[0], a_folds)) split_groups.extend(np.array_split(group_b[0], b_folds)) k_folds = [] for i in range(folds):", "generates error because test includes single class for key, clf in models: scores", "nargs='+', help=\"file containing music features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+',", "error because test includes single class for key, clf in models: scores =", "# happy/sad data = np.delete(data, np.where(data[:,-1]==0), axis=0) group_a = np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1)", "clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w')) # end of function #---------", "argparse import math import yaml import numpy as np from collections import defaultdict", "for i in range(folds): test = split_groups[i] train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train,", "one_sample def main(): ''' Computes cross-validation by holding out a contiguous block of", "= parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators def process_condition(fnames, condition): print 'condition',", "default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie features, input to model\", default=[])", "# scaled x_data = preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit',", "holding out a contiguous block of windows from a single emotion. ''' np.random.seed(1)", "to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str, nargs='+', help=\"file containing movie features, input to", "nargs='+', help=\"file containing movie features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number of", "type=str, nargs='+', help=\"file containing music+walking features, input to model\", default=[]) parser.add_argument(\"-mo\", metavar='mo', type=str,", "test) in k_folds: x_train, x_test = x_data[train], x_data[test] y_train, y_test = y_data[train], y_data[test]", "'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo, 'mo') if __name__ == \"__main__\":", "from sklearn import metrics from sklearn import preprocessing from sklearn.dummy import DummyClassifier from", "''' np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument(\"-mu\", metavar='mu', type=str, nargs='+', help=\"file containing music features,", "= {'f1':[], 'acc':[]} for (train, test) in k_folds: x_train, x_test = x_data[train], x_data[test]", "('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)),", "group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds - a_folds split_groups =", "\"--output_file\", help=\"output with pickle results\", type=str) args = parser.parse_args() output_file = args.output_file N_ESTIMATORS", "key, clf in models: scores = {'f1':[], 'acc':[]} for (train, test) in k_folds:", "from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from permute.core import one_sample def", "data[:,-1] # scaled x_data = preprocessing.scale(x_data) models = [ ('baseline', DummyClassifier(strategy = 'most_frequent')),", "#('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc", "average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results,", "data[:,:-1] y_data = data[:,-1] # scaled x_data = preprocessing.scale(x_data) models = [ ('baseline',", "= fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete neutral to see if we", "= np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data = data[:,-1] #", "defaultdict(list), 'rf': defaultdict(list)} folds = 10 for fname in fnames: print 'classifying: %s'", "sklearn import linear_model from sklearn import metrics from sklearn import preprocessing from sklearn.dummy", "help=\"file containing music features, input to model\", default=[]) parser.add_argument(\"-mw\", metavar='mw', type=str, nargs='+', help=\"file", "a contiguous block of windows from a single emotion. ''' np.random.seed(1) parser =", "= 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)), ] results['labels'].append(label)", "= N_ESTIMATORS)), ] results['labels'].append(label) # roc_auc generates error because test includes single class", "def process_condition(fnames, condition): print 'condition', condition results = {'labels':[], 'baseline': defaultdict(list), 'logit': defaultdict(list),", "= np.where(data[:,-1]==1) group_b = np.where(data[:,-1]==-1) a_folds = math.floor(folds/2.) b_folds = folds - a_folds", "train = np.concatenate((split_groups[:i] + split_groups[i+1:])) k_folds.append((train, test)) x_data = data[:,:-1] y_data = data[:,-1]", "fname.split('/')[-1] data = np.loadtxt(fname, delimiter=',') # delete neutral to see if we can", "y_pred = clf.predict(x_test) _f1 = metrics.f1_score(y_test, y_pred, average='weighted') _acc = metrics.accuracy_score(y_test, y_pred) y_proba", "neutral to see if we can distinguish between # happy/sad data = np.delete(data,", "= metrics.accuracy_score(y_test, y_pred) y_proba = clf.predict_proba(x_test) scores['f1'].append(_f1) scores['acc'].append(_acc) results[key]['f1'].append(np.mean(scores['f1'])) results[key]['acc'].append(np.mean(scores['acc'])) yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w'))", "= args.estimators def process_condition(fnames, condition): print 'condition', condition results = {'labels':[], 'baseline': defaultdict(list),", "type=str, nargs='+', help=\"file containing movie features, input to model\", default=[]) parser.add_argument(\"-e\", \"--estimators\", help=\"number", "process_condition(args.mu, 'mu') if args.mw: process_condition(args.mw, 'mw') if args.mo: process_condition(args.mo, 'mo') if __name__ ==", "[ ('baseline', DummyClassifier(strategy = 'most_frequent')), #('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)), ('logit', linear_model.LogisticRegression()), ('rf', RandomForestClassifier(n_estimators =", "pickle results\", type=str) args = parser.parse_args() output_file = args.output_file N_ESTIMATORS = args.estimators def" ]
[ "def shortcode(self): \"Returns abbreviation -- text between parenthesis -- if there is any\"", "other methodologies does this particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next", "SimpleArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'image', 'date', 'head', 'includes_form',", "= models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description =", "'Not worked on'), ('ip', 'In progress'), ('dr', 'Draft ready for review'), ('rv', 'Reviewed", "field - be sure to fill this out\") image = models.ImageField(null=True, blank=True, upload_to='articles')", "blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description", "choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting of impacts of material flows\", max_length=1,", "blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True,", "HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source website/article --", "if this is not linked to a publication\") class Meta: ordering = [\"date\"]", "to 350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive',", "= ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField(", "blank=True) CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes - pending'), ('yes', 'Yes -", "= ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES)", "cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the importing process') url = models.CharField(max_length=500,", "StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300,", "models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True)", "\"(\" in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta:", "'event', 'authors', 'spaces', 'tags', 'materials'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn':", "= models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return '%s", "('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations", "related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not yet approved/deactivated\")", "__str__(self): return self.title class Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model): name =", "def source(self): \"Return details of where this reference was published at/in\" if self.journal:", "= models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class Meta: ordering = [\"name\"] class", "url = models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups =", "blank=True) head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content", "'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE", "ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder',", "class DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024,", "models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return", "class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = (", "= models.BooleanField(default=True) def __str__(self): return self.title class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm):", "null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace,", "= models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return", "StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects", "('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site =", "<reponame>metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3<filename>src/core/models.py<gh_stars>0 from django.db import models from multiplicity.models import ReferenceSpace, License from django.forms import", "= models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic', 'Academic'), ('theses',", "= [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField() def __str__(self): return", "blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License,", "null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to environment\",", "= models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of", "'spaces', 'tags', 'materials'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url':", "models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True,", "this object was created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this", "help_text=\"Mark if tag is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method =", "ready for review'), ('rv', 'Reviewed - DONE'), ('ec', 'External copy'), ('sk', 'Skip -", "models.BooleanField() projects = models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site =", "updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True # By default, any model", "blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic', 'Research Institution'),", "db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True,", "'license'] labels = { 'primary_space': 'Reference space', 'url': 'Video URL' } class Tag(models.Model):", "(7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True,", "basis as needed, but reverse-chronological is a good # default ordering for most", "'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors')", "'Video URL' } class Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True)", "families\" verbose_name = \"method family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318},", "title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True) date", "blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website", "timestamp reprensenting when this object was last updated. updated_at = models.DateTimeField(auto_now=True) class Meta:", "= models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'),", "Video fields = ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels", "address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further develop/improve this methodology\")", "blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE = ( ('u',", "Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects", "is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS", "= models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class", "collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE,", "800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True)", "a partial or conditional way'), ('0', '0 - Not included at all'), )", "null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS =", "url = models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True) people", "from django.contrib.sites.managers import CurrentSiteManager from django.conf import settings # Used for image resizing", "= ( ('3', '3 - The item is a defining feature of the", "null=True, blank=True, help_text=\"URL of the source website/article -- ONLY enter if this is", "= models.TextField(null=True, blank=True, help_text=\"Name of the source website/article -- ONLY enter if this", "from multiplicity.models import ReferenceSpace, License from django.forms import ModelForm from django.template.defaultfilters import slugify", "(1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True)", "models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True,", "'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE)", "= models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to implement it?\") consideration = models.TextField(null=True,", "self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model =", "default=False) PARENTS = ( (1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4,", "('NL', 'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True,", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING,", "blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True)", "name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True)", "models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection,", "__str__(self): return self.name class Meta: ordering = [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255)", "class Meta: ordering = [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255)", "- \" + self.type + \" - \" + self.project.name class Timeline(models.Model): title", "blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name class Meta: ordering", "models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia',", "= ( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL',", "stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods and", "= models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def __str__(self): return self.title class Meta:", "= models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title", "def __str__(self): return self.name class Meta: ordering = [\"name\"] class Organization(models.Model): name =", "models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def", "on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license", "OrganizationForm(ModelForm): class Meta: model = Organization exclude = ['id', 'processes'] class Publisher(models.Model): name", "null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) member_since =", "blank=True) ORG_TYPE = ( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government',", "= models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content',", "'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True)", "a previous classification - can be left empty\") def __str__(self): return self.name @property", "of the approach'), ('2', '2 - The feature is typically included in the", "'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site", "class ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude = ['id', 'organizations', 'processes', 'date_added',", "can be left empty\") def __str__(self): return self.name @property def shortcode(self): \"Returns abbreviation", "null=True, blank=True) STATUS = ( ('nw', 'Not worked on'), ('ip', 'In progress'), ('dr',", "url = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title class EventForm(ModelForm): class Meta:", "('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active')", "+ self.type + \" - \" + self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255)", ") website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager()", "= models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta: ordering", "website = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255,", "blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True,", "on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class", "models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10,", "file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if you are the", "article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'),", "gaps does in other methodologies does this particular methodology address?\") next_steps = models.TextField(null=True,", "= models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True,", "title = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)", "models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255,", "entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving force boundary for induced", "blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations =", "blank=True) def __str__(self): return self.title class VideoForm(ModelForm): class Meta: model = Video exclude", "'tags', 'materials'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL',", "of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be considered as consumption-based", "models.TextField(null=True, blank=True, help_text='Additional comments about the importing process') url = models.CharField(max_length=500, null=True, blank=True)", "blank=True) research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255,", "type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True,", "Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This", "on a # per-model basis as needed, but reverse-chronological is a good #", "'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = (", "thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) references =", "= ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels = {", "\"Returns abbreviation -- text between parenthesis -- if there is any\" if \"(\"", "blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source website/article -- ONLY", "(self.firstname, self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model", "upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People,", "choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag,", "= { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) /", "models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True)", "blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2})", "models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url =", "return self.title class Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255)", "__str__(self): return self.organization.name + \" - \" + self.type + \" - \"", "= Organization exclude = ['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self):", "'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a", "are best - please resize to 350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'),", "print_relevance = models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'),", "exclude = ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note", "['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP", "site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site =", "blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional", "blank=True, null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE,", "status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE =", "= models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True)", "on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True)", "all'), ) substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and", "or other system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320},", "__str__(self): return self.name @property def shortcode(self): \"Returns abbreviation -- text between parenthesis --", "blank=True) def __str__(self): return self.name class Meta: verbose_name_plural = \"Method families\" verbose_name =", "based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does in other", "models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does in other methodologies does", "this reference was published at/in\" if self.journal: return self.journal.name elif self.event: return self.event.name", "model = models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True)", "import settings # Used for image resizing from stdimage.models import StdImageField import re", "= models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True,", "= models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True,", "blank=True) comments = models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'), ('active', 'Active'), ('deleted',", "also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list", "choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "review'), ('rv', 'Reviewed - DONE'), ('ec', 'External copy'), ('sk', 'Skip - will not", "(6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2,", "def __str__(self): return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE,", "blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting", "abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True)", "ordering for most models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255)", "title = models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255) date", "default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True)", "shortcode(self): \"Returns abbreviation -- text between parenthesis -- if there is any\" if", "models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes - pending'),", "consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts", "class Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model = People", "return self.name class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True,", "3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance =", "models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True,", "# By default, any model that inherits from `TimestampedModel` should # be ordered", "choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True) url =", "models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm):", "class Meta: verbose_name_plural = \"Method families\" verbose_name = \"method family\" class Method(models.Model): tag", "elif self.event: return self.event.name else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class", "models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class Article(models.Model):", "ONLY enter if this is not linked to a publication\") source = models.TextField(null=True,", "= [\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True)", "can override this on a # per-model basis as needed, but reverse-chronological is", "model = Video fields = ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail',", "model = Organization exclude = ['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def", "models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'),", "= models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True,", "= models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self):", "= ['name'] class ProjectForm(ModelForm): class Meta: model = Project exclude = ['id', 'site',", "override this on a # per-model basis as needed, but reverse-chronological is a", "'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs", "['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels = { 'primary_space':", "Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model):", "'Draft ready for review'), ('rv', 'Reviewed - DONE'), ('ec', 'External copy'), ('sk', 'Skip", "'includes_form', 'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, )", "objects = models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return self.title", "(1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference", "type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class Meta: ordering = [\"name\"]", "models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points =", "help_text=\"If the journal does not appear in the list, please leave empty and", "= models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied',", "case study using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True)", "import Site from django.contrib.sites.managers import CurrentSiteManager from django.conf import settings # Used for", "organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'),", "'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True)", "models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE", "django.urls import reverse class TimestampedModel(models.Model): # A timestamp representing when this object was", "way'), ('0', '0 - Not included at all'), ) substances = models.CharField(\"selected specific", "ordering = ['name'] class ProjectForm(ModelForm): class Meta: model = Project exclude = ['id',", "People exclude = ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True)", "models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE',", "models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title class EventForm(ModelForm): class Meta: model =", "name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self): return self.name class CaseStudy(models.Model):", "from django.urls import reverse class TimestampedModel(models.Model): # A timestamp representing when this object", "from `TimestampedModel` should # be ordered in reverse-chronological order. We can override this", "image resizing from stdimage.models import StdImageField import re from django.urls import reverse class", "on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True,", "= models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta: ordering = [\"position\", \"tag__name\"] class", "models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government',", "and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)", ") section = models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects =", "= models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True,", "= [\"name\"] class OrganizationForm(ModelForm): class Meta: model = Organization exclude = ['id', 'processes']", "Project exclude = ['id', 'site', 'references', 'organizations'] labels = { 'name': 'Project title',", "hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative", "website/article -- ONLY enter if this is not linked to a publication\") source", "models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags =", "blank=True, help_text=\"Year of the data being visualized -- ONLY enter if this is", "get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering = ['name'] class ProjectForm(ModelForm): class", "class SimpleArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'image', 'date', 'head',", "'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return self.name class", "= models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies", "from django.contrib.auth import get_user_model User = get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers", "STATUS = ( ('nw', 'Not worked on'), ('ip', 'In progress'), ('dr', 'Draft ready", "to a publication\") class Meta: ordering = [\"date\"] def __str__(self): return self.title class", "publications = models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField() theses", "'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True,", "= models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this object was last updated. updated_at", "the importing process') url = models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True)", "django.contrib.auth.models import User from django.contrib.auth import get_user_model User = get_user_model() from django.contrib.sites.models import", "class People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True)", "'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude = ['id', 'organizations',", "choices=TYPES) def __str__(self): return self.organization.name + \" - \" + self.type + \"", "the name in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year =", "choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE)", "__str__(self): return self.name class Meta: ordering = [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255)", "# A timestamp reprensenting when this object was last updated. updated_at = models.DateTimeField(auto_now=True)", "( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'),", "considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True,", "models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True,", "'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True,", "'url': 'Video URL' } class Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True,", "= models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type =", "class Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True,", "null=True, blank=True) date = models.DateField() def __str__(self): return self.title class DataViz(models.Model): title =", "blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class Meta: ordering =", "Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7,", "models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter =", "is any\" if \"(\" in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return", "'end_date', 'status', 'url'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher':", "def __str__(self): return self.title class Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class Meta:", "= HTMLField('Content', help_text=\"The content field is a required field - be sure to", "320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving force", "help_text=\"Target audience of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True,", "blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True,", "class Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction =", "['id', 'site'] labels = { 'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm): class", "superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS =", "TimestampedModel(models.Model): # A timestamp representing when this object was created. created_at = models.DateTimeField(auto_now_add=True)", "'date', 'thumbnail', 'license'] labels = { 'primary_space': 'Reference space', 'url': 'Video URL' }", "and in a partial or conditional way'), ('0', '0 - Not included at", ") hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not yet approved/deactivated\") include_in_glossary", "null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does", "date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255,", "('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20,", "(300, 300), 'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large':", "sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on =", "blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces", "= models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country =", "accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People,", "= models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True)", "blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do", "internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles", "models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position =", "models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title class", "help_text=\"Circular economy / closing loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of", "blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes =", "project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'),", "self.journal.name elif self.event: return self.event.name else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False)", "ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude = ['id',", "\" + self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link", "= models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about", "= models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True)", "null=True, blank=True) specific = models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production", "= models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return", "= models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace,", "CurrentSiteManager from django.conf import settings # Used for image resizing from stdimage.models import", "ordering = [\"-year\", \"title\"] def source(self): \"Return details of where this reference was", "models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True,", "- can be left empty\") def __str__(self): return self.name @property def shortcode(self): \"Returns", "'site'] labels = { 'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm): class Meta:", "VideoForm(ModelForm): class Meta: model = Video exclude = ['id', 'site'] labels = {", "+ self.type + \" - \" + self.project.name class Timeline(models.Model): title = models.CharField(max_length=255)", "as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "return self.event.name else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference", "('whatwedo', 'What We Do'), ('newsevents', 'News and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS,", "date = models.DateField() def __str__(self): return self.title class DataViz(models.Model): title = models.CharField(max_length=255) image", "null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name_plural = \"Method families\" verbose_name", "HTMLField('Content', help_text=\"The content field is a required field - be sure to fill", "= models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), )", "supervisor = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description',", "blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on", "ModelForm from django.template.defaultfilters import slugify from tinymce import HTMLField from django.contrib.auth.models import User", "references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True,", "strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS = (", "null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS", "= ['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True)", "'External copy'), ('sk', 'Skip - will not be published'), ) status = models.CharField(max_length=2,", "ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher',", "model = Reference fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract',", "results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons =", "'url'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor':", "__str__(self): return self.name class Meta: verbose_name_plural = \"Method families\" verbose_name = \"method family\"", "Meta: model = Video exclude = ['id', 'site'] labels = { 'primary_space': 'Reference", "- \" + self.type + \" - \" + self.reference.title class MaterialGroup(models.Model): name", "'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm): class Meta: model = Video fields", "\"lastname\"] class PeopleForm(ModelForm): class Meta: model = People exclude = ['id'] class PeopleNote(models.Model):", "models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only", "limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not", "default ordering for most models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name =", "to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of material and energy\",", "models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return '%s %s'", "'gps', 'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'),", "models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return self.people.firstname + \"", "\" - \" + self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True,", "'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class Meta: model =", "blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3})", "blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces =", "limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance", "CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference,", "class OrganizationForm(ModelForm): class Meta: model = Organization exclude = ['id', 'processes'] class Publisher(models.Model):", "DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)})", "= models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model", "blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes =", "( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government',", "models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site = CurrentSiteManager()", "+ \" - \" + self.type + \" - \" + self.project.name class", "models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css", "['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site =", "description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True,", "__str__(self): return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class", "= models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True)", "= HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic',", "Meta: model = Tag exclude = ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title", "models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'),", "included only occasionally in the mode of analysis, and in a partial or", "parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People, blank=True)", "Event exclude = ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description', null=True,", "= models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True,", ") type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd',", "models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name", "HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw', 'Not", "('2', 'Flows of substances'), ('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True,", "True # By default, any model that inherits from `TimestampedModel` should # be", "pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True,", "active = models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources',", "blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling", "'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True)", "('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20,", "class Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True,", "Meta: model = Article fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content']", "in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract", "} class Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag =", "from stdimage.models import StdImageField import re from django.urls import reverse class TimestampedModel(models.Model): #", "get_user_model User = get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from", "'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True,", "type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'),", "( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), ) website", "models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True,", "blank=True, upload_to='journals') def __str__(self): return self.name class Meta: ordering = [\"name\"] class People(models.Model):", "(300, 300), 'large': (800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300),", "300), 'large': (800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large':", "could also be considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\",", "('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class Meta:", "exclude = ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True)", "from django.forms import ModelForm from django.template.defaultfilters import slugify from tinymce import HTMLField from", "null=True, blank=True) recycling = models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self):", "upload_to='references', help_text='Only upload the file if you are the creator or you have", "be ordered in reverse-chronological order. We can override this on a # per-model", "'site', 'references', 'organizations'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher':", "null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed =", "model = Tag exclude = ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title =", "class Meta: model = Event exclude = ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255)", "url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source website/article -- ONLY enter", "return self.title class VideoForm(ModelForm): class Meta: model = Video exclude = ['id', 'site']", "class MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self): return self.name", "models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title class", "'Reference space (optional)' } class VideoUploadForm(ModelForm): class Meta: model = Video fields =", "name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True)", "= models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class Meta:", "help_text=\"Purpose of the study\") def __str__(self): return self.title class Meta: verbose_name_plural = \"case", "= models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by =", "# A timestamp representing when this object was created. created_at = models.DateTimeField(auto_now_add=True) #", "(300, 300), 'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large':", "= models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site", "= models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log')", "item is included only occasionally in the mode of analysis, and in a", "description = HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False},", "= [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model = Tag exclude = ['id',", "specific = models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production", "= models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of material", "= HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw',", "= ( ('3', 'Relation in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental", "recycling = models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes =", "= models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program =", "null=True) end_date = models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'), ('ongoing', 'In progress'),", "feature is typically included in the techique'), ('1', '1 - The item is", "def __str__(self): return self.title class Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model): name", "at/in\" if self.journal: return self.journal.name elif self.event: return self.event.name else: return self.type.name def", "blank=True) date = models.DateField() def __str__(self): return self.title class DataViz(models.Model): title = models.CharField(max_length=255)", "choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling", "= models.DateField() def __str__(self): return self.title class DataViz(models.Model): title = models.CharField(max_length=255) image =", "return self.tag.name class Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model", "'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization =", "'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name + \" -", "__str__(self): return self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace',", "('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True)", "class Meta: model = Reference exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors',", "= models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds only\")", "multiplicity.models import ReferenceSpace, License from django.forms import ModelForm from django.template.defaultfilters import slugify from", "limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True,", "('other', 'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)", "Meta: model = Organization exclude = ['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255)", "= People exclude = ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date =", "model = Video exclude = ['id', 'site'] labels = { 'primary_space': 'Reference space", "class MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths',", "models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher,", "models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source website/article -- ONLY enter if this", "null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate", "linkedin = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True,", "choices=GROUP, null=True, blank=True) def __str__(self): return self.name class Meta: ordering = [\"name\"] class", "DONE'), ('ec', 'External copy'), ('sk', 'Skip - will not be published'), ) status", "models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE", "linked to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being", "gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does in other methodologies does this particular", "models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True)", "(5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent", "this particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further", "'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels", "= HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects", "'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site", "__str__(self): return self.title class DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300,", "on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True)", "blank=True, help_text=\"What gaps does in other methodologies does this particular methodology address?\") next_steps", "= Project fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date',", "def __str__(self): return self.name class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta:", "default=False, help_text=\"Mark if tag is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method", "comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True,", "good # default ordering for most models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model):", "= models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials =", "= models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional", "material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools", "a # per-model basis as needed, but reverse-chronological is a good # default", "country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True)", "null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True)", "models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return", "return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318})", "weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw', 'Not worked on'), ('ip',", "'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'),", "blank=True, help_text=\"Do they continue to implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy", "= models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True)", ") status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope =", "open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import =", "= ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group =", "models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True)", "User = get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.conf", "return self.name class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model):", "copy'), ('sk', 'Skip - will not be published'), ) status = models.CharField(max_length=2, choices=STATUS,", "['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = ( ('EN',", "null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw', 'Not worked", "'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels = {", "'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels =", "models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True)", "models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class VideoForm(ModelForm): class Meta: model", "django.db import models from multiplicity.models import ReferenceSpace, License from django.forms import ModelForm from", "limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return", "METHOD_CLASS = ( ('3', 'Relation in UM systems'), ('2', 'Flows of substances'), ('1',", "blank=True ) authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS = ( ('about',", "- completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb':", "substances'), ('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category =", "ordering = [\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE)", "models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'),", "models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True)", "pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending',", "choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to", "blank=True) specific = models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production =", "= models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True,", "= models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city", "blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes =", "blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL,", "CurrentSiteManager() def __str__(self): return '%s %s' % (self.firstname, self.lastname) class Meta: ordering =", "= [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255,", "choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() primary_space", "goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING,", "blank=True, help_text=\"Which paper is a representative case study using this methodology?\") materials_catalog_used =", "materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete", "THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype", "= HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True)", "blank=True, help_text=\"Note: could also be considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for", "models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form", "the journal does not appear in the list, please leave empty and add", "authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True,", "( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True)", "EventForm(ModelForm): class Meta: model = Event exclude = ['article'] class VideoCollection(models.Model): title =", "models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull':", "350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'),", "= models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner',", "class Meta: ordering = [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField()", "changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING,", "null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'})", "ProjectForm(ModelForm): class Meta: model = Project exclude = ['id', 'site', 'references', 'organizations'] labels", "'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model = Article fields =", "of where this reference was published at/in\" if self.journal: return self.journal.name elif self.event:", "= models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING,", "sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be considered as", "class VideoForm(ModelForm): class Meta: model = Video exclude = ['id', 'site'] labels =", "('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE,", "environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of material and energy\", max_length=1,", "null=True, blank=True, help_text=\"If the journal does not appear in the list, please leave", "models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the", "if \"(\" in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class", "('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We", "'active'] class SimpleArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'image', 'date',", "or conditional way'), ('0', '0 - Not included at all'), ) substances =", "flows between sectors, industries or acticity fields, or other system components\") classification =", "class ProjectForm(ModelForm): class Meta: model = Project exclude = ['id', 'site', 'references', 'organizations']", "= models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True)", "'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS,", "'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE,", "models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255,", "impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True,", "'comments', 'file'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL',", "on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE,", "labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s)", "full_name = models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization,", "parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a previous classification - can", "class ArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'introduction', 'content', 'image',", "thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self):", "blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image2 =", "'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site =", "models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location =", "in other methodologies does this particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed", "models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True,", "family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = (", "300), 'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)},", "('sk', 'Skip - will not be published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True,", "= models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros", "= models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING,", "group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return self.name class Meta: ordering", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING,", "labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class", "'url': 'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE)", "Meta: model = People exclude = ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE)", "a defining feature of the approach'), ('2', '2 - The feature is typically", "on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country',", "def __str__(self): return self.name class Meta: ordering = [\"name\"] class People(models.Model): firstname =", "= ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), )", "= models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class VideoForm(ModelForm): class Meta:", "models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return self.title class Meta:", "blank=True) description = models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"] class Color(models.Model): name", "loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators = models.TextField(null=True,", "or you have permission to do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops =", "left empty\") def __str__(self): return self.name @property def shortcode(self): \"Returns abbreviation -- text", "not be published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True,", "class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering =", "status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization,", "class Meta: ordering = [\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True,", "= models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces", "Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude =", "/ closing loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators", "authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community',", "Meta: ordering = [\"-year\", \"title\"] def source(self): \"Return details of where this reference", "type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model):", "HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name_plural = \"Method families\"", "def __str__(self): return self.name class Meta: verbose_name_plural = \"Method families\" verbose_name = \"method", "models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target", "output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True)", "= HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source website/article", "blank=True, upload_to='references', help_text='Only upload the file if you are the creator or you", "help_text=\"The content field is a required field - be sure to fill this", "= models.BooleanField() events = models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField() multimedia =", "self.name @property def shortcode(self): \"Returns abbreviation -- text between parenthesis -- if there", "blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the importing process') url =", "models.CharField(\"quantitative weighting of impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit =", "HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects =", "= models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True,", "'thumbnail', 'license'] labels = { 'primary_space': 'Reference space', 'url': 'Video URL' } class", "['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url',", "= models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self): return self.name class CaseStudy(models.Model): title", "django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.conf import settings # Used", "included in the techique'), ('1', '1 - The item is included only occasionally", "('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'),", "= models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if you are the creator", "people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by =", "models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations')", "blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if you", "= models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting", "self.title class Meta: ordering = [\"-year\", \"title\"] def source(self): \"Return details of where", "linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True,", "class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = (", "= ['id', 'site'] labels = { 'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm):", "slugify from tinymce import HTMLField from django.contrib.auth.models import User from django.contrib.auth import get_user_model", "compounds only\") materials = models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy", "models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True)", "class MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField() def __str__(self): return self.name class", "audience of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\")", "models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field is a required field - be", "does not appear in the list, please leave empty and add the name", "null=True, blank=True) impacts = models.CharField(\"quantitative weighting of impacts of material flows\", max_length=1, choices=METHOD_SCORING,", "('other', 'Other'), ) start = models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type =", "comments = models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'),", "('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True, blank=True)", "published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope", "StdImageField import re from django.urls import reverse class TimestampedModel(models.Model): # A timestamp representing", "'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255,", "published at/in\" if self.journal: return self.journal.name elif self.event: return self.event.name else: return self.type.name", "-- ONLY enter if this is not linked to a publication\") class Meta:", "model = Event exclude = ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description =", "return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering = [\"name\"] class MethodClassification(models.Model): name", "'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors =", "Not included at all'), ) substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True,", "blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image3 =", "class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField() events", "('pending', 'Yes - pending'), ('yes', 'Yes - completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS,", "system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True)", "STATUS = ( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), )", "class Meta: model = Article fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug',", "blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\",", "CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes - pending'), ('yes', 'Yes - completed'),", "processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace,", "bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs", "null=True, blank=True, help_text=\"Specification of flows between sectors, industries or acticity fields, or other", "blank=True, help_text=\"This was a previous classification - can be left empty\") def __str__(self):", "models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering =", "organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255,", "'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True)", "limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2,", "= models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop consideration\") target_audience = models.TextField(null=True, blank=True,", "null=True, blank=True, help_text=\"Elements and basic compounds only\") materials = models.CharField(\"materials / bulk materials\",", "on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action", "license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class VideoForm(ModelForm): class", "models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type':", "blank=True, help_text=\"Name of the source website/article -- ONLY enter if this is not", "= models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS", "target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\")", "funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True)", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of", "industries or acticity fields, or other system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale", "'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255,", "null=True, blank=True ) authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS = (", "models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self): return self.name class CaseStudy(models.Model): title =", "= models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are", "on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'),", "('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News and Events'), )", "blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class Article(models.Model): title", "= models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True,", "} class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES =", "models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True)", "Commons'), ('other', 'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE,", "'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels = { 'name': 'Project title',", "model = People exclude = ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date", "300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True,", "where this reference was published at/in\" if self.journal: return self.journal.name elif self.event: return", "any model that inherits from `TimestampedModel` should # be ordered in reverse-chronological order.", "name = models.CharField(max_length=255) description = models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name", "models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does not appear in the list,", "= VideoCollection exclude = ['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255) url =", "else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference,", "Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop',", "= models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source website/article -- ONLY enter if", "show_in_list = models.BooleanField(default=True) def __str__(self): return self.title class Meta: ordering = [\"position\"] class", "models.Manager() on_site = CurrentSiteManager() def __str__(self): return '%s %s' % (self.firstname, self.lastname) class", "output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program", "if there is any\" if \"(\" in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")]", "METHOD_SCORING = ( ('3', '3 - The item is a defining feature of", "-- text between parenthesis -- if there is any\" if \"(\" in self.name:", "reverse class TimestampedModel(models.Model): # A timestamp representing when this object was created. created_at", "class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model): name =", "('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES)", "models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "('m', 'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS =", "('pending', 'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE,", "blank=True) GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), )", "blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True,", "when this object was created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when", "= models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True,", "from django.db import models from multiplicity.models import ReferenceSpace, License from django.forms import ModelForm", "publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of the source website/article -- ONLY enter", "on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description", "feature of the approach'), ('2', '2 - The feature is typically included in", "choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects =", "TagForm(ModelForm): class Meta: model = Tag exclude = ['id', 'gps', 'parent', 'hidden'] class", "Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'),", "models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True,", "('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self):", "cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the study\")", "models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True)", "ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model = People exclude =", "needed, but reverse-chronological is a good # default ordering for most models. ordering", "google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255,", "models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description',", "= StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects',", "('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors", "null=True, blank=True) def __str__(self): return self.name class Meta: ordering = [\"name\"] class Organization(models.Model):", "Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return", "null=True, blank=True) description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image", "= models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter", "HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic', 'Research", "= models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'),", "dataviz = models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField() theses = models.BooleanField() reference_spaces", "'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization", "models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta: ordering =", "site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site", "'Flows of substances'), ('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True)", "= models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does in other methodologies", "= models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES", "file if you are the creator or you have permission to do so')", "('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True, blank=True) end =", "318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True,", "energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "+ \" - \" + self.type + \" - \" + self.reference.title class", "django.contrib.auth import get_user_model User = get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers import", "StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300,", "( ('3', '3 - The item is a defining feature of the approach'),", "models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager() show_in_list", "= models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete =", "Meta: abstract = True # By default, any model that inherits from `TimestampedModel`", "blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added", "progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active =", "Organization exclude = ['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return", "null=True, blank=True) GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'),", "timestamp representing when this object was created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp", "{ 'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm): class Meta: model = Video", "= models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255) date =", "= models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock", "= models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup,", "Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class Meta: model = Article fields =", "blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255,", "data being visualized -- ONLY enter if this is not linked to a", "URL' } class Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag", "fill this out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE,", "return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE)", "models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People, blank=True) active =", "of the source website/article -- ONLY enter if this is not linked to", "blank=True) description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE =", "__str__(self): return '%s %s' % (self.firstname, self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"]", "= models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'),", "('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing')", "'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other',", "only occasionally in the mode of analysis, and in a partial or conditional", "'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url", "(9, 'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was", "import slugify from tinymce import HTMLField from django.contrib.auth.models import User from django.contrib.auth import", "self.article.title class EventForm(ModelForm): class Meta: model = Event exclude = ['article'] class VideoCollection(models.Model):", "'News and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE,", "defining feature of the approach'), ('2', '2 - The feature is typically included", "upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description =", "'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return", "blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = (", "null=True, blank=True, help_text=\"Note: could also be considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts", "representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is a representative case study using this", "(2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time", "models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class Meta: ordering", "category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True, blank=True) strengths =", "(4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'),", "HTMLField from django.contrib.auth.models import User from django.contrib.auth import get_user_model User = get_user_model() from", "('applied', 'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'),", "= models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def __str__(self):", "('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector',", "email = models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True,", "= models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return self.title class", "__str__(self): return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True)", "class TagForm(ModelForm): class Meta: model = Tag exclude = ['id', 'gps', 'parent', 'hidden']", "People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email", "methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further develop/improve this", "'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active", "Article fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article", "= models.Manager() on_site = CurrentSiteManager() def __str__(self): return '%s %s' % (self.firstname, self.lastname)", "null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'), ('active', 'Active'),", "models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark", "= models.BooleanField() projects = models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site", "'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News and Events'), ) section = models.CharField(max_length=20,", "description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses',", "= models.BooleanField() news = models.BooleanField() events = models.BooleanField() publications = models.BooleanField() dataviz =", "[\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return self.name", "material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports", "[\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model = Tag exclude = ['id', 'gps',", "'url', 'comments', 'file'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url':", "null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) def", "'1 - The item is included only occasionally in the mode of analysis,", "300), 'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site", "blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher =", "related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS =", "= models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the importing process')", "- \" + self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True)", "further develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is a representative", "blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email =", "'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels = { 'name':", "organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'),", "a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of the source website/article -- ONLY", "MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True,", "blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True)", "'site'] class Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField() author", "def __str__(self): return self.name @property def shortcode(self): \"Returns abbreviation -- text between parenthesis", "('0', '0 - Not included at all'), ) substances = models.CharField(\"selected specific substances\",", "choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds only\") materials = models.CharField(\"materials / bulk", "+ \" - \" + self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description =", "class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution", "blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True)", "[\"title\"] class ArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'introduction', 'content',", "= models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User,", "models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm):", "website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site", "blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS =", ") thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) references", "TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20,", "enter if this is not linked to a publication\") source = models.TextField(null=True, blank=True,", "'isbn', 'url', 'comments', 'file'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN',", "class ProjectUserForm(ModelForm): class Meta: model = Project fields = ['name', 'researcher', 'type', 'thesistype',", "conditional way'), ('0', '0 - Not included at all'), ) substances = models.CharField(\"selected", "return self.article.title class EventForm(ModelForm): class Meta: model = Event exclude = ['article'] class", "HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' )", "site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return", "link = models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def __str__(self): return self.title class", "= models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True)", "ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to implement it?\") consideration =", "blank=True) active = models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community', 'Community'), ('research', 'Research'),", "text between parenthesis -- if there is any\" if \"(\" in self.name: s", "was published at/in\" if self.journal: return self.journal.name elif self.event: return self.event.name else: return", "__str__(self): return self.name class Meta: ordering = [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255)", "models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title", "# per-model basis as needed, but reverse-chronological is a good # default ordering", "class Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField() author =", "mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1,", "models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city',", "if this is not linked to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year", "default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() date", "news = models.BooleanField() events = models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField() multimedia", "null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note:", "model = Project exclude = ['id', 'site', 'references', 'organizations'] labels = { 'name':", "on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField() events = models.BooleanField() publications = models.BooleanField()", "= models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True)", "= models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be", "import ReferenceSpace, License from django.forms import ModelForm from django.template.defaultfilters import slugify from tinymce", "of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing", "LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'),", "blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True,", "name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic', 'Academic'),", "'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'image',", "analysis, and in a partial or conditional way'), ('0', '0 - Not included", "blank=True) output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True,", "help_text=\"Year of the data being visualized -- ONLY enter if this is not", "photos are best - please resize to 350x350 pixels\") PEOPLE_STATUS = ( ('active',", "name = models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model): name = models.CharField(max_length=255) def", "on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization',", "ordering = [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True)", "blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by =", "basic compounds only\") materials = models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "a good # default ordering for most models. ordering = ['-created_at', '-updated_at'] class", "GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group", "= models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True,", "choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING", "introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content =", "= ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES)", "= models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True) date =", "of impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True,", "models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)", "} class VideoUploadForm(ModelForm): class Meta: model = Video fields = ['title', 'website', 'url',", "('3', '3 - The item is a defining feature of the approach'), ('2',", "year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added =", "__str__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255,", "('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents',", "CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self):", "class Meta: abstract = True # By default, any model that inherits from", "= models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3', '3", "self.title class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection", "pending'), ('yes', 'Yes - completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo", "models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space", "blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field is a required", "= models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name + \" - \" + self.type", "models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this object was last updated. updated_at =", "blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True)", "on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True,", "def __str__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name =", ") EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other',", "weighting of impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255,", "= { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm):", "models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces", "User from django.contrib.auth import get_user_model User = get_user_model() from django.contrib.sites.models import Site from", "methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace,", "blank=True) end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True,", "'Yes - pending'), ('yes', 'Yes - completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True,", "models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a previous classification - can be left", "models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False)", "name = models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering = [\"name\"] class", "blank=True, upload_to='people', help_text=\"Square photos are best - please resize to 350x350 pixels\") PEOPLE_STATUS", "limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups", "website/article -- ONLY enter if this is not linked to a publication\") year", "'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', }", "1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference =", "= models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in UM", "material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to", "details of where this reference was published at/in\" if self.journal: return self.journal.name elif", "('dr', 'Draft ready for review'), ('rv', 'Reviewed - DONE'), ('ec', 'External copy'), ('sk',", "help_text=\"Which paper is a representative case study using this methodology?\") materials_catalog_used = models.TextField(null=True,", "'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file']", "class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude = ['id', 'site'] class Video(models.Model):", "events = models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField() projects", "do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True)", "= models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return self.people.firstname +", "class Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model = Tag", "models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True,", "('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical", "for most models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon", "s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering = [\"name\"]", "models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in UM systems'),", "models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True,", "points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description", "self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths =", "('rv', 'Reviewed - DONE'), ('ec', 'External copy'), ('sk', 'Skip - will not be", "reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return self.people.firstname", "updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta: ordering = [\"title\"] class", "= models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta: ordering =", "source website/article -- ONLY enter if this is not linked to a publication\")", "STATUS = ( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8,", "model = Article fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class", "models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user", "'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS", "'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return", "/ Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project", "= ( ('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo',", "models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True)", "'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True,", "re from django.urls import reverse class TimestampedModel(models.Model): # A timestamp representing when this", "on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name class", "null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) site", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting of impacts of material flows\",", "being visualized -- ONLY enter if this is not linked to a publication\")", "institution = models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255,", "journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does not appear in", "[\"name\"] class OrganizationForm(ModelForm): class Meta: model = Organization exclude = ['id', 'processes'] class", "= models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does", "components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity", "in a partial or conditional way'), ('0', '0 - Not included at all'),", "help_text=\"This was a previous classification - can be left empty\") def __str__(self): return", "'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True)", "models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals')", "'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10,", "db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model = Reference fields = ['language',", "site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return self.people.firstname + \" \" +", "null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license =", "models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager()", "related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True)", "of analysis, and in a partial or conditional way'), ('0', '0 - Not", "= models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\",", "= models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True,", "is not linked to a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of the", "material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing =", "'Reviewed - DONE'), ('ec', 'External copy'), ('sk', 'Skip - will not be published'),", "self.name class Meta: verbose_name_plural = \"Method families\" verbose_name = \"method family\" class Method(models.Model):", "['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField( Article,", "steps to further develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is", "'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta:", "models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the", "\"Return details of where this reference was published at/in\" if self.journal: return self.journal.name", "title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by", "Meta: ordering = [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation", "__str__(self): return self.name class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class", "Used for image resizing from stdimage.models import StdImageField import re from django.urls import", "= models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization',", "main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting =", "null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE = (", "projects = models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site,", "['id', 'site', 'references', 'organizations'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type',", "__str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering =", "models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"] class Color(models.Model):", "image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects',", "PARENTS = ( (1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'),", "null=True, blank=True, help_text=\"Key socio-institutional entity (driving force boundary for induced flows)\") temporal_study_boundary =", "on_site = CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id])", "+ \" - \" + self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description =", "('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self):", "null=True, blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True)", "= Video exclude = ['id', 'site'] labels = { 'primary_space': 'Reference space (optional)'", "consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators = models.TextField(null=True, blank=True,", "ordering = [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField() def __str__(self):", "order. We can override this on a # per-model basis as needed, but", "variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects =", "('ec', 'External copy'), ('sk', 'Skip - will not be published'), ) status =", "= models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True,", "blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self):", "= Video fields = ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license']", "Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model = Reference fields =", "hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting of impacts of", "Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model = People exclude", "return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering = ['name'] class ProjectForm(ModelForm): class Meta:", "list, please leave empty and add the name in the comments\") event =", "choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows", "[\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets =", "('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name", "self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField()", "= ( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS,", "null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager()", "is typically included in the techique'), ('1', '1 - The item is included", "models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name", "on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self):", "verbose_name_plural = \"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name", "flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting of impacts of material", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True,", "image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name class Meta: ordering =", "django.template.defaultfilters import slugify from tinymce import HTMLField from django.contrib.auth.models import User from django.contrib.auth", "= models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public", "'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True,", "('no', 'No'), ('pending', 'Yes - pending'), ('yes', 'Yes - completed'), ) cityloops =", "'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'),", "def __str__(self): return self.article.title class EventForm(ModelForm): class Meta: model = Event exclude =", "class Meta: model = Reference fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal',", "models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin =", "= ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = (", "'Other'), ) start = models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20,", "class VideoUploadForm(ModelForm): class Meta: model = Video fields = ['title', 'website', 'url', 'primary_space',", "fields, or other system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id':", "import get_user_model User = get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager", "ordering = [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True)", "choices=ORG_TYPE) def __str__(self): return self.name class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class", "choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True,", "= models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return self.name class Meta: ordering =", "people = models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia", "= models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class", "position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3',", "- The item is a defining feature of the approach'), ('2', '2 -", "isbn = models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS = ( ('pending',", "= models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self):", "it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop consideration\") target_audience =", "so') open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import", "self.organization.name + \" - \" + self.type + \" - \" + self.reference.title", "models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return self.name def", "= models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True,", ") relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no', 'No'), ('pending',", "null=True, blank=True, help_text=\"This was a previous classification - can be left empty\") def", "null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False})", "class Meta: model = Project fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor',", "image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best - please resize to", "= models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model): name =", "parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True,", "null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True,", "be considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING,", "318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in UM systems'), ('2', 'Flows of", "models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True)", "researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True,", "'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True, blank=True) end", "'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url':", "Project fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date',", "on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if", "null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True, blank=True)", "= models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths',", "labels = { 'primary_space': 'Reference space', 'url': 'Video URL' } class Tag(models.Model): name", "model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"]", "help_text=\"Note: could also be considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden", "'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class", "of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons", "'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE =", "'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model = Reference fields = ['language', 'title', 'title_original_language',", "models.Manager() on_site = CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type,", "= HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name", "best - please resize to 350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired',", "the file if you are the creator or you have permission to do", "help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the", "= models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager()", "relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes", "null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments", "was last updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True # By", "variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300),", "choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True,", "between parenthesis -- if there is any\" if \"(\" in self.name: s =", "self.name class Meta: ordering = [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description =", "null=True, blank=True, help_text=\"Do they continue to implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular", "'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type", "icon = models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports',", "('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True,", "models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods and services\", max_length=1,", "twitter = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255,", "'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def", "model that inherits from `TimestampedModel` should # be ordered in reverse-chronological order. We", "blank=True, help_text=\"Purpose of the study\") def __str__(self): return self.title class Meta: verbose_name_plural =", "reprensenting when this object was last updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract", "UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference", "= HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children'", "= models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True,", "blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def __str__(self): return", "'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'),", "import StdImageField import re from django.urls import reverse class TimestampedModel(models.Model): # A timestamp", "[\"-year\", \"title\"] def source(self): \"Return details of where this reference was published at/in\"", "on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True,", "/ bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class VideoForm(ModelForm):", "Meta: model = Event exclude = ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description", "labels = { 'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm): class Meta: model", "variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300),", "null=True, blank=True) def __str__(self): return self.article.title class EventForm(ModelForm): class Meta: model = Event", "max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds only\") materials = models.CharField(\"materials /", "blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference =", "reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE)", "this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True,", "('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES)", "and add the name in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True)", "models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True,", "= models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), )", "null=True, blank=True) METHOD_SCORING = ( ('3', '3 - The item is a defining", "class Meta: model = VideoCollection exclude = ['id', 'site'] class Video(models.Model): title =", "this is not linked to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of", "to do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True,", "blank=True) url = models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups", "django.conf import settings # Used for image resizing from stdimage.models import StdImageField import", ") status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations =", "= StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE)", "blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True, blank=True) strengths", "reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering = ['name'] class ProjectForm(ModelForm): class Meta: model", "models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE,", "materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs =", "'-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP =", "blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) description =", "models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does not", "models from multiplicity.models import ReferenceSpace, License from django.forms import ModelForm from django.template.defaultfilters import", "description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL of the source", "'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES',", "on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources", "ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True,", "Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo',", "energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING,", "this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is a representative case study", "def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering = ['name'] class ProjectForm(ModelForm):", "'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return", "site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() primary_space =", "__str__(self): return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news", "blank=True, help_text=\"Circular economy / closing loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience", "models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type", "def __str__(self): return self.name class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name", "blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site", "cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the importing", "Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True,", "(3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'),", "blank=True, help_text=\"Key socio-institutional entity (driving force boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary,", "null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name class Meta:", ") cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300),", "= models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True) head", "'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class", "models.DateTimeField(auto_now=True) class Meta: abstract = True # By default, any model that inherits", "twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255,", "= models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes -", "'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference,", "Meta: model = Project fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email',", "for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting of impacts", "= Project exclude = ['id', 'site', 'references', 'organizations'] labels = { 'name': 'Project", "class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table", "= models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True,", "MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model): name = models.CharField(max_length=255)", "blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin =", "Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation", "limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they", "= models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True)", "models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be considered", "= models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports =", "'What We Do'), ('newsevents', 'News and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about')", "'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status =", "field is a required field - be sure to fill this out\") image", "blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS = (", "TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), ) type =", "models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public =", "help_text='Only upload the file if you are the creator or you have permission", "next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further develop/improve this methodology\") representative_paper", "models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags =", "= ['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255) description =", "= models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference,", "ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model = Tag exclude =", "\" + self.type + \" - \" + self.project.name class Timeline(models.Model): title =", "( ('no', 'No'), ('pending', 'Yes - pending'), ('yes', 'Yes - completed'), ) cityloops", "language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type", "on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True,", "'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels =", "continue to implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop", "StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb':", "'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model = Article fields = ['title',", "= models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site =", "- The feature is typically included in the techique'), ('1', '1 - The", "flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1,", "fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date',", "boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData,", "return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths", "models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description',", "= models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner',", "type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal", "models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'),", "is not linked to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the", "= models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING,", "on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model = Reference", "avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255,", "from tinymce import HTMLField from django.contrib.auth.models import User from django.contrib.auth import get_user_model User", "user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference =", "= ( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance =", "blank=True) description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User,", "models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if you are the creator or", "= models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True,", "models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True)", "# default ordering for most models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name", "force boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources =", "visualized -- ONLY enter if this is not linked to a publication\") class", "exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels =", "null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden", "status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization')", "blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving force boundary for", "'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News and Events'), ) section", "= ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels = {", "linked to a publication\") class Meta: ordering = [\"date\"] def __str__(self): return self.title", "blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best - please resize", "models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros =", "date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return", "out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True,", "= models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), )", "on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug =", "ordering = [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self):", "choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes - pending'), ('yes',", "models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60,", "on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does not appear", "'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist =", "models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True)", "in the list, please leave empty and add the name in the comments\")", "return self.name class Meta: ordering = [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description", "default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at =", "publication\") class Meta: ordering = [\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model): people", "= CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class", "systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS,", "= ( (1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5,", "ReferenceSpace, License from django.forms import ModelForm from django.template.defaultfilters import slugify from tinymce import", "models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True,", "def __str__(self): return '%s %s' % (self.firstname, self.lastname) class Meta: ordering = [\"firstname\",", "blank=True) email = models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date =", "blank=True) METHOD_SCORING = ( ('3', '3 - The item is a defining feature", "} class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES =", "def __str__(self): return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField()", "Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True)", "Meta: verbose_name_plural = \"Method families\" verbose_name = \"method family\" class Method(models.Model): tag =", "s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering = [\"name\"] class MethodClassification(models.Model): name =", "318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups =", "start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'),", "affiliation = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField()", "self.name class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model): name", "HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name class", "scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key", "models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False)", "blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True) image1", "('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True)", "False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not yet", "self.event.name else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference =", "('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT',", "self.tag.name class Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model =", ") language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField()", "= get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.conf import", "in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific", "not linked to a publication\") class Meta: ordering = [\"date\"] def __str__(self): return", "null=True, blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date", "null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model", "models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'),", "also be considered as consumption-based accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1,", "'authors', 'spaces', 'tags', 'materials'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN',", "self.organization.name + \" - \" + self.type + \" - \" + self.project.name", "- DONE'), ('ec', 'External copy'), ('sk', 'Skip - will not be published'), )", "a representative case study using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as =", "null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor", "comments about the importing process') url = models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255,", "% (self.firstname, self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta:", "null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True,", "null=True, blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type':", "choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), )", "model = Article fields = ['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class", "[\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField() def __str__(self): return self.name", "Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency',", "= ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), )", "( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def", ") status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE", "'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels = { 'primary_space': 'Reference space',", "on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager() show_in_list =", "site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() date =", "self.event: return self.event.name else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model):", "self.journal: return self.journal.name elif self.event: return self.event.name else: return self.type.name def accountingMethods(self): return", "= models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community', 'Community'),", "in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts'), ) method_class =", "blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments =", "'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude", "= models.TextField(null=True, blank=True, help_text=\"What gaps does in other methodologies does this particular methodology", "'Reference space', 'url': 'Video URL' } class Tag(models.Model): name = models.CharField(max_length=255) description =", "note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"]", "self.id]) class Meta: ordering = ['name'] class ProjectForm(ModelForm): class Meta: model = Project", "return self.title class Meta: ordering = [\"-year\", \"title\"] def source(self): \"Return details of", "( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20,", "permission to do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments =", "null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific", "blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could", "created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this object was last", "self.name class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta: model = Organization", "def __str__(self): return self.tag.name class Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class", "for image resizing from stdimage.models import StdImageField import re from django.urls import reverse", "= CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def", "= models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE,", "[\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter =", "[\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model = People exclude = ['id'] class", "class CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference =", "ordering = [\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True,", "= models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site =", "logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces =", "upload_to='journals') def __str__(self): return self.name class Meta: ordering = [\"name\"] class People(models.Model): firstname", "head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field", "creator or you have permission to do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops", "inherits from `TimestampedModel` should # be ordered in reverse-chronological order. We can override", "theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def", "models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors", "models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose", "choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be considered as consumption-based accounting?)\") hidden_flows =", "verbose_name = \"method family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\")", "-- ONLY enter if this is not linked to a publication\") source =", "Meta: model = VideoCollection exclude = ['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255)", "models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True)", "= 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model = Reference fields = ['language', 'title',", "( ('3', 'Relation in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts'),", "models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal,", "null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class Meta: ordering", "space (optional)' } class VideoUploadForm(ModelForm): class Meta: model = Video fields = ['title',", "Meta: ordering = [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20) def", "'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE)", "models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to implement it?\") consideration = models.TextField(null=True, blank=True,", "name = models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255,", "models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\",", "blank=True) description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image =", "CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return self.title class Meta: ordering = [\"position\"]", "= models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods and services\",", "'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2,", "implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop consideration\") target_audience", "null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin", "= models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta: ordering = [\"title\"] class ArticleForm(ModelForm):", "this on a # per-model basis as needed, but reverse-chronological is a good", "def __str__(self): return self.title class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta:", "'primary_space': 'Reference space', 'url': 'Video URL' } class Tag(models.Model): name = models.CharField(max_length=255) description", "representative case study using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True,", "location = models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return", "= \"Method families\" verbose_name = \"method family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE,", "django.forms import ModelForm from django.template.defaultfilters import slugify from tinymce import HTMLField from django.contrib.auth.models", "('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), )", "website = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE,", "'materials'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', }", "['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True,", "models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering = [\"name\"] class Journal(models.Model): name", "Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations", "models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True)", "name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True,", "= models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255,", "self.name class Meta: ordering = [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname =", "section = models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager()", "models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'),", "'start_date', 'end_date', 'status', 'url'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type',", "class Meta: model = Video exclude = ['id', 'site'] labels = { 'primary_space':", "} class ProjectUserForm(ModelForm): class Meta: model = Project fields = ['name', 'researcher', 'type',", "TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20,", "on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type =", "particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further develop/improve", "the mode of analysis, and in a partial or conditional way'), ('0', '0", "False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces =", "the source website/article -- ONLY enter if this is not linked to a", "blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def", "status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255,", "complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta:", "= [\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets", "\" - \" + self.type + \" - \" + self.reference.title class MaterialGroup(models.Model):", "= models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering", "models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True,", "of flows between sectors, industries or acticity fields, or other system components\") classification", "image = models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True", "blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does in", "description = models.TextField(null=True, blank=True) def __str__(self): return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255)", "leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class Meta: model = Project fields =", "abbreviation -- text between parenthesis -- if there is any\" if \"(\" in", "models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name + \" - \" + self.type +", "methodologies does this particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps", "models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) member_since", "blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description =", "import CurrentSiteManager from django.conf import settings # Used for image resizing from stdimage.models", "print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'),", "('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher',", "typically included in the techique'), ('1', '1 - The item is included only", "specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds only\") materials =", "fields = ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels =", "on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type =", "(800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True,", "models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving force boundary for induced flows)\") temporal_study_boundary", "output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True)", "models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status", "models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE)", "models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects", "self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255,", "blank=True, help_text=\"Elements and basic compounds only\") materials = models.CharField(\"materials / bulk materials\", max_length=1,", "blank=True) recycling = models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes", "'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference =", "models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True) url", "= models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self):", "'Wikimedia Commons'), ('other', 'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site,", "indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True,", "publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self):", "help_text=\"URL of the source website/article -- ONLY enter if this is not linked", "classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity =", "'Yes - completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects',", "'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private", "SECTIONS = ( ('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'),", "'No'), ('pending', 'Yes - pending'), ('yes', 'Yes - completed'), ) cityloops = models.CharField(max_length=20,", "source = models.TextField(null=True, blank=True, help_text=\"Name of the source website/article -- ONLY enter if", "blank=True, help_text=\"If the journal does not appear in the list, please leave empty", "space', 'url': 'Video URL' } class Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description',", "= models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h',", "null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def", "name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) publisher", "def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name", "models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses =", "content = HTMLField('Content', help_text=\"The content field is a required field - be sure", "('2', '2 - The feature is typically included in the techique'), ('1', '1", "null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS = (", "= StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb':", "blank=True) link = models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def __str__(self): return self.title", "models.TextField(null=True, blank=True) def __str__(self): return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method =", "import re from django.urls import reverse class TimestampedModel(models.Model): # A timestamp representing when", "image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects',", "blank=True, help_text='Additional comments about the importing process') url = models.CharField(max_length=500, null=True, blank=True) doi", "( (1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'),", "['name'] class ProjectForm(ModelForm): class Meta: model = Project exclude = ['id', 'site', 'references',", "be left empty\") def __str__(self): return self.name @property def shortcode(self): \"Returns abbreviation --", "class Meta: model = Video fields = ['title', 'website', 'url', 'primary_space', 'description', 'author',", "= ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note =", "'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE,", "A timestamp reprensenting when this object was last updated. updated_at = models.DateTimeField(auto_now=True) class", "blank=True) def __str__(self): return self.name class Meta: ordering = [\"name\"] class Organization(models.Model): name", "__str__(self): return self.title class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model", "'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments',", "'Projects'), ('applied', 'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor',", "self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering =", "'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels = { 'name': 'Project", "= ( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'),", "people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField() events = models.BooleanField()", ") authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS = ( ('about', 'About'),", "= { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model):", "= models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True,", "models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = ( ('theses',", "= models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True)", "models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication Types'), (2,", "HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw', 'Not worked on'), ('ip', 'In progress'),", "self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference", "('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National", "\"Method families\" verbose_name = \"method family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id':", "on_delete=models.CASCADE, null=True, blank=True) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True)", "Meta: ordering = [\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255,", "of material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1,", "= models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget =", "= models.CharField(\"quantitative weighting of impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit", "@property def shortcode(self): \"Returns abbreviation -- text between parenthesis -- if there is", "models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces =", "models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\")", "('1', '1 - The item is included only occasionally in the mode of", "mode of analysis, and in a partial or conditional way'), ('0', '0 -", "= models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points", "models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps", "self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering = [\"name\"] class MethodClassification(models.Model):", "'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News", "'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model =", "null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher", "exclude = ['id', 'site', 'references', 'organizations'] labels = { 'name': 'Project title', 'thesistype':", "= models.TextField(null=True, blank=True) def __str__(self): return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method", "= models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon',", "models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled',", "blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'), ('m',", "from django.conf import settings # Used for image resizing from stdimage.models import StdImageField", "'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class Meta: ordering", "on'), ('ip', 'In progress'), ('dr', 'Draft ready for review'), ('rv', 'Reviewed - DONE'),", "('phd', 'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url =", "('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News and Events'), ) section =", "= ['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model = Article", "'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True, blank=True) end = models.DateField(null=True,", "Meta: ordering = [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField() def", "process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True)", "return self.name class Meta: verbose_name_plural = \"Method families\" verbose_name = \"method family\" class", "models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True,", "reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True,", "= models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'), ('ongoing',", "is a good # default ordering for most models. ordering = ['-created_at', '-updated_at']", "= \"method family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS", "ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table =", "on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3})", "date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta:", "'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization,", "null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between", "models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3', '3 -", "blank=True) def __str__(self): return self.title class Meta: ordering = [\"-year\", \"title\"] def source(self):", "limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255,", "Reference exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels", "'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'),", "through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False})", "linked to a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of the source website/article", "slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True,", "max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be considered as consumption-based accounting?)\") hidden_flows", "('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status", "= Article fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model):", "Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution =", "reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup,", "tinymce import HTMLField from django.contrib.auth.models import User from django.contrib.auth import get_user_model User =", "= models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim =", "models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar", "= models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True)", "'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI',", "= models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True,", "'url': 'URL', } class ProjectUserForm(ModelForm): class Meta: model = Project fields = ['name',", "models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent", "models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3', '3 - The item is a", "the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract =", "reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim", "only\") materials = models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy =", "lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True)", "def __str__(self): return self.name class Meta: ordering = [\"name\"] class Journal(models.Model): name =", "`TimestampedModel` should # be ordered in reverse-chronological order. We can override this on", "limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces", "= models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True) location", "= self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name class Meta: ordering = [\"name\"] class", "hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is superseded/not yet approved/deactivated\") include_in_glossary =", "'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name +", "empty and add the name in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True,", "class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = (", "variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace',", "models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True)", "on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best -", "email = models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255,", "= models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses", "ProjectUserForm(ModelForm): class Meta: model = Project fields = ['name', 'researcher', 'type', 'thesistype', 'institution',", "null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description =", "= models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return self.people.firstname + \" \" + self.people.lastname", "is a required field - be sure to fill this out\") image =", "MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self): return self.name class", "-- ONLY enter if this is not linked to a publication\") year =", "Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project =", "leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project,", "import HTMLField from django.contrib.auth.models import User from django.contrib.auth import get_user_model User = get_user_model()", "class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3',", "models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True, blank=True)", "blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar =", "return self.name class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta: model =", "VideoUploadForm(ModelForm): class Meta: model = Video fields = ['title', 'website', 'url', 'primary_space', 'description',", "blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True)", "authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden':", "- will not be published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position", "{ 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class", "city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL,", "content field is a required field - be sure to fill this out\")", "import models from multiplicity.models import ReferenceSpace, License from django.forms import ModelForm from django.template.defaultfilters", "blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3', '3 - The", "class Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self):", "not linked to a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of the source", "'Skip - will not be published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True)", "models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True)", ") start = models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE)", "description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def", "= models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True)", "models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType,", "a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being visualized --", "blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name class", "= models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'),", "blank=True) def __str__(self): return self.article.title class EventForm(ModelForm): class Meta: model = Event exclude", "models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True)", "__str__(self): return self.article.title class EventForm(ModelForm): class Meta: model = Event exclude = ['article']", "models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia',", "image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site,", "models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True,", "- Not included at all'), ) substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING,", "name in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField()", "= models.TextField(null=True, blank=True, help_text='Additional comments about the importing process') url = models.CharField(max_length=500, null=True,", "'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review", "the data being visualized -- ONLY enter if this is not linked to", "= models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True,", "null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager() def", "= models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True,", "models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def __str__(self): return", "consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop consideration\") target_audience = models.TextField(null=True,", "blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date =", "ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta: model = Organization exclude = ['id',", "'description', 'author', 'date', 'thumbnail', 'license'] labels = { 'primary_space': 'Reference space', 'url': 'Video", "VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE,", "License from django.forms import ModelForm from django.template.defaultfilters import slugify from tinymce import HTMLField", "default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager()", "return self.name class Meta: ordering = [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname", "PeopleForm(ModelForm): class Meta: model = People exclude = ['id'] class PeopleNote(models.Model): people =", "date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo',", "= models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True,", "'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event',", "The item is a defining feature of the approach'), ('2', '2 - The", "models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace,", "'Other'), ) thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True)", "Meta: ordering = [\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People,", "choices=PARENTS, null=True, blank=True, help_text=\"This was a previous classification - can be left empty\")", "'Inactive'), ('pending', 'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site,", "def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people =", "weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name_plural =", "'url': 'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE)", "= models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools =", "cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large':", "blank=True, help_text=\"URL of the source website/article -- ONLY enter if this is not", "on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at", "= [\"title\"] class ArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'introduction',", "null=True, blank=True) def __str__(self): return self.title class VideoForm(ModelForm): class Meta: model = Video", "a publication\") class Meta: ordering = [\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model):", "{ 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project", "= models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If", "def __str__(self): return self.organization.name + \" - \" + self.type + \" -", "def __str__(self): return self.title class DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb':", "return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model):", "blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta:", "type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm):", "('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site =", "any\" if \"(\" in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else: return self.name", "Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def", "action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id", "objects = models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True)", "= { 'primary_space': 'Reference space', 'url': 'Video URL' } class Tag(models.Model): name =", "on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField()", "hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta:", "occasionally in the mode of analysis, and in a partial or conditional way'),", "is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication Types'), (2, 'Metabolism Studies'),", "3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue", "help_text='Additional comments about the importing process') url = models.CharField(max_length=500, null=True, blank=True) doi =", "models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name", "A timestamp representing when this object was created. created_at = models.DateTimeField(auto_now_add=True) # A", "blank=True) STATUS = ( ('pending', 'Pending'), ('active', 'Active'), ('deleted', 'Deleted'), ) status =", "limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving", "= models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group =", "models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True,", "models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best", "on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class VideoForm(ModelForm): class Meta: model =", "blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate =", "blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True,", "- be sure to fill this out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent", "null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image2", "[\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True,", "models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type", "to fill this out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article',", "include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication", "= HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True,", "'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language", "\" + self.type + \" - \" + self.reference.title class MaterialGroup(models.Model): name =", "= ( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status", "of the study\") def __str__(self): return self.title class Meta: verbose_name_plural = \"case studies\"", "models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)},", "[\"position\"] class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude = ['id', 'site'] class", "= models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id =", "= models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date =", "= models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self):", "= models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being visualized -- ONLY enter if", "models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField() events = models.BooleanField() publications =", "strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self): return", "= models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField() events = models.BooleanField() publications", "= models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True,", "null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of", "models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True) internal_notes", "= models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace,", "models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities',", "(1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager()", "models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails')", "return self.journal.name elif self.event: return self.event.name else: return self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True,", "models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True)", "MethodClassification(models.Model): name = models.CharField(max_length=255) description = models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model):", "self.name class Meta: ordering = [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website =", "flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True)", "proposed next steps to further develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which", "models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of material and", "import reverse class TimestampedModel(models.Model): # A timestamp representing when this object was created.", "from django.template.defaultfilters import slugify from tinymce import HTMLField from django.contrib.auth.models import User from", "models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the importing process') url", "was created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this object was", "UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model): user = models.ForeignKey(User,", "max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form =", "models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes =", "'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels = { 'primary_space': 'Reference space', 'url':", "('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start =", "will not be published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position =", "('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name + \"", "models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def __str__(self): return self.title class Meta: verbose_name_plural", "Meta: model = Project exclude = ['id', 'site', 'references', 'organizations'] labels = {", "class Meta: model = Organization exclude = ['id', 'processes'] class Publisher(models.Model): name =", "django.contrib.sites.managers import CurrentSiteManager from django.conf import settings # Used for image resizing from", "'URL', } class ProjectUserForm(ModelForm): class Meta: model = Project fields = ['name', 'researcher',", "class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'), ('CH',", "( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def", "event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True)", "blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate =", "self.type + \" - \" + self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description", "models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between sectors, industries or acticity", "enter if this is not linked to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True,", "blank=True) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses =", "= models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True,", "upload the file if you are the creator or you have permission to", "= models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date =", "models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language =", "= models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True,", "= ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site", "help_text=\"What gaps does in other methodologies does this particular methodology address?\") next_steps =", "'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels = { 'name': 'Project title', 'thesistype': 'Thesis", "models.TextField(null=True, blank=True, help_text=\"Name of the source website/article -- ONLY enter if this is", "objects = models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections", "null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True)", "models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material',", "be sure to fill this out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent =", "null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date =", "'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude = ['id',", "leave empty and add the name in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE,", "methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget", "( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'), ) start", "= models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), )", "journal does not appear in the list, please leave empty and add the", "'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations =", "does in other methodologies does this particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The", "Reference fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access',", "item is a defining feature of the approach'), ('2', '2 - The feature", "[\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description =", "= CurrentSiteManager() def __str__(self): return '%s %s' % (self.firstname, self.lastname) class Meta: ordering", "= Article fields = ['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta:", "'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True,", "UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1,", "models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE,", "= ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype =", "abstract = True # By default, any model that inherits from `TimestampedModel` should", "classification - can be left empty\") def __str__(self): return self.name @property def shortcode(self):", "def __str__(self): return self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space =", "= models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title", "on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) class", "= models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file", "blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other", "blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class", "models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if", "= models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField() theses =", "'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels = { 'authorlist': 'Author(s)', 'doi':", "null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag", "fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi',", "'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class Meta: model", "internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self):", "blank=True) output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True,", "exclude = ['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name", "blank=True) location = models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) def __str__(self):", "= models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255, null=True,", "= models.Manager() on_site = CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections =", "'file'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', }", "models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to implement it?\")", "'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license'] labels = { 'primary_space': 'Reference", "False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL,", "per-model basis as needed, but reverse-chronological is a good # default ordering for", "in the techique'), ('1', '1 - The item is included only occasionally in", "('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE,", "description = models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True)", "= models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True,", "blank=True,help_text=\"The proposed next steps to further develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True,", "'active','content'] class Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE =", "= models.Manager() on_site = CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\",", "email_public = models.BooleanField() city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country", "models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'), ('active',", "includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field is a required field", "= models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True,", "methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is a representative case study using", "blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3})", "('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News and", "null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag,", "'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class Meta:", "description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = (", "'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering", ") status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization,", "'In progress'), ('dr', 'Draft ready for review'), ('rv', 'Reviewed - DONE'), ('ec', 'External", "= models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title class EventForm(ModelForm): class Meta: model", "'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model = Reference", "blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model =", "partial or conditional way'), ('0', '0 - Not included at all'), ) substances", "= models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True,", "parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic', 'Research Institution'), ('universities',", "models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file =", "= ['id', 'site', 'references', 'organizations'] labels = { 'name': 'Project title', 'thesistype': 'Thesis", "= [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class Meta: model = People exclude = ['id']", "( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'), ) thesistype = models.CharField(max_length=20,", "CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta:", "default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True)", "techique'), ('1', '1 - The item is included only occasionally in the mode", "accounting?)\") hidden_flows = models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts =", "= models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True,", "= models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True,", "= models.CharField(max_length=255) description = models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name =", "class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model): name =", "process') url = models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn =", "= [\"-year\", \"title\"] def source(self): \"Return details of where this reference was published", "models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave", "models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name class Meta: ordering = [\"name\"] class", "False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title", "__str__(self): return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id':", "('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self):", "class Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self',", "space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date", "ordered in reverse-chronological order. We can override this on a # per-model basis", "stdimage.models import StdImageField import re from django.urls import reverse class TimestampedModel(models.Model): # A", "models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests =", "= ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status',", "processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True,", "enter if this is not linked to a publication\") class Meta: ordering =", "= models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False})", "= HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw', 'Not worked on'), ('ip', 'In", "('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP,", "2}) profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True,", "class Meta: ordering = ['name'] class ProjectForm(ModelForm): class Meta: model = Project exclude", "= models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests", "help_text=\"Specification of flows between sectors, industries or acticity fields, or other system components\")", "{ 'primary_space': 'Reference space', 'url': 'Video URL' } class Tag(models.Model): name = models.CharField(max_length=255)", "entity (driving force boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True)", "= models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field is", "return self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE,", "NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news = models.BooleanField() events =", "= models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between sectors, industries or", "class Meta: ordering = [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20)", "models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class", "= models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes = models.TextField(null=True, blank=True)", "= models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further develop/improve this methodology\") representative_paper =", "('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization'), ) type = models.CharField(max_length=20, choices=TYPES) def __str__(self):", "= models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description =", "Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True)", "on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True)", "Meta: model = Video fields = ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date',", "production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING,", "models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class Meta: ordering = [\"-year\", \"title\"] def", "blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of", "completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300,", "Tag(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE,", "if you are the creator or you have permission to do so') open_access", "blank=True) introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content", "ordering = [\"title\"] class ArticleForm(ModelForm): class Meta: model = Article fields = ['title',", "\"method family\" class Method(models.Model): tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS =", "class ReferenceForm(ModelForm): class Meta: model = Reference fields = ['language', 'title', 'title_original_language', 'authorlist',", "- please resize to 350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'),", "db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square", "end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date = models.CharField(max_length=60, null=True, blank=True)", "blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between sectors,", "= models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube',", "blank=True) class Meta: ordering = [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css =", "develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is a representative case", "Site from django.contrib.sites.managers import CurrentSiteManager from django.conf import settings # Used for image", "(300, 300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE,", "ReferenceForm(ModelForm): class Meta: model = Reference fields = ['language', 'title', 'title_original_language', 'authorlist', 'type',", "3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True,", "default, any model that inherits from `TimestampedModel` should # be ordered in reverse-chronological", ") group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return self.name class Meta:", "target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True)", "import User from django.contrib.auth import get_user_model User = get_user_model() from django.contrib.sites.models import Site", "= models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField() theses = models.BooleanField() reference_spaces =", "'Reports'), ('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return", "substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds only\") materials = models.CharField(\"materials", "null=True) STATUS = ( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'),", "models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People,", "models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True)", "= models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name class Meta: ordering = [\"name\"]", "importing process') url = models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn", "on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return self.title class Meta: ordering", "on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES = ( ('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization',", "'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)',", "The feature is typically included in the techique'), ('1', '1 - The item", "sectors, industries or acticity fields, or other system components\") classification = models.ManyToManyField(MethodClassification, blank=True)", "choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed", "null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) material_temp_notes", "description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE,", "= models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles =", "( ('nw', 'Not worked on'), ('ip', 'In progress'), ('dr', 'Draft ready for review'),", "multimedia = models.BooleanField() projects = models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True)", "decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE =", "'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model", "__str__(self): return self.title class Meta: ordering = [\"-year\", \"title\"] def source(self): \"Return details", "researcher = models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255,", "models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) email_public = models.BooleanField() city =", "Video exclude = ['id', 'site'] labels = { 'primary_space': 'Reference space (optional)' }", "on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City", "publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being visualized -- ONLY", "for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE,", "= models.TextField(null=True, blank=True) abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file", "to further develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper is a", "'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'),", "null=True, blank=True) ORG_TYPE = ( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'),", "blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class Meta: ordering =", "models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'),", "method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True) description", "worked on'), ('ip', 'In progress'), ('dr', 'Draft ready for review'), ('rv', 'Reviewed -", "null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING =", "models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the", "blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor =", "= models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True,", "blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)", "null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='journals') def __str__(self): return self.name class Meta:", "'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels = { 'authorlist': 'Author(s)',", "models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies =", "'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no', 'No'),", "models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3}) budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True,", "class Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return self.name class", "blank=True) def __str__(self): return self.name class CaseStudy(models.Model): title = models.CharField(max_length=255) method = models.OneToOneField(Tag,", "most models. ordering = ['-created_at', '-updated_at'] class ReferenceType(models.Model): name = models.CharField(max_length=255) icon =", "class Meta: model = Tag exclude = ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model):", "induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True,", "= models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering = [\"name\"] class Journal(models.Model):", "self.name class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True)", "and basic compounds only\") materials = models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True,", "Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta: model = Tag exclude", "models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta: ordering = [\"title\"]", "Tag exclude = ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES", "url = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255,", "= models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What", ") substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic", "VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'),", "ORG_TYPE = ( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional", "impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True)", "uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference,", "models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field is a", "url = models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255,", "default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects',", "blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods", "blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) site =", "self.name class Meta: ordering = [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url =", "blank=True) STATUS = ( ('nw', 'Not worked on'), ('ip', 'In progress'), ('dr', 'Draft", "this is not linked to a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of", "['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model = Article fields", "= [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude = ['id', 'site']", "class Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class Meta: model = Article fields", "primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'),", "MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255)", "return self.title class Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class Meta: model =", "null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date", "class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True)", "= models.DateTimeField(auto_now=True) class Meta: abstract = True # By default, any model that", "= [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter", "exclude = ['id', 'site'] labels = { 'primary_space': 'Reference space (optional)' } class", "def __str__(self): return self.title class VideoForm(ModelForm): class Meta: model = Video exclude =", "= models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference,", "not appear in the list, please leave empty and add the name in", "purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def __str__(self): return self.title class", "help_text=\"Elements and basic compounds only\") materials = models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING,", "= models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does not appear in the", "name = models.CharField(max_length=255) def __str__(self): return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description", "models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True)", "null=True, blank=True) introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False)", "blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid =", "= models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal =", "= models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date =", "} class ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude = ['id', 'organizations', 'processes',", "models.TextField(null=True, blank=True,help_text=\"The proposed next steps to further develop/improve this methodology\") representative_paper = models.TextField(null=True,", "models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True)", "Meta: ordering = [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True,", "reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors'", "= models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes", "orcid = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255,", "return self.title class NewsletterSubscriber(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) datasets = models.BooleanField() news =", "= CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail", "logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True) image1 =", "self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering = ['name'] class", "title = models.CharField(max_length=255) LANGUAGES = ( ('EN', 'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR',", "('ip', 'In progress'), ('dr', 'Draft ready for review'), ('rv', 'Reviewed - DONE'), ('ec',", "choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of material and energy\", max_length=1, choices=METHOD_SCORING, null=True,", "( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status =", "on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS", "max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows", "models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model =", "self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model): name", "= HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name_plural = \"Method", "on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in UM systems'), ('2',", "tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) materials", "'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class", "'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels", "= StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb':", "of the data being visualized -- ONLY enter if this is not linked", "title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL',", "null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best - please", "'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a previous classification", "Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class Meta: model = Project fields", "= models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True,", "exclude = ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255) LANGUAGES =", "approach'), ('2', '2 - The feature is typically included in the techique'), ('1',", "= models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best - please resize to 350x350", "approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = ( (1,", "'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE)", "- The item is included only occasionally in the mode of analysis, and", "= models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True, blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l',", "abstract_original_language = models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True,", "title = models.CharField(max_length=255) method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE)", "economy / closing loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of results\")", "reference was published at/in\" if self.journal: return self.journal.name elif self.event: return self.event.name else:", "null=True, blank=True) CITYLOOPS = ( ('no', 'No'), ('pending', 'Yes - pending'), ('yes', 'Yes", "%s' % (self.firstname, self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm): class", "('3', 'Relation in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts'), )", "- pending'), ('yes', 'Yes - completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True)", "blank=True, help_text=\"Target audience of results\") indicators = models.TextField(null=True, blank=True, help_text=\"Indicators\") pros = models.TextField(null=True,", "if tag is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True,", "models.DateField() def __str__(self): return self.title class DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz',", "'author', 'date', 'thumbnail', 'license'] labels = { 'primary_space': 'Reference space', 'url': 'Video URL'", "the approach'), ('2', '2 - The feature is typically included in the techique'),", "models.ManyToManyField(People, blank=True) VIDEOSITES = ( ('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other',", "'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) TYPES", "Meta: ordering = ['name'] class ProjectForm(ModelForm): class Meta: model = Project exclude =", "null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) google_scholar = models.CharField(max_length=255, null=True, blank=True) orcid", "through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes =", "= models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags", "through='ProjectOrganization', blank=True) researcher = models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email", "null=True, blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling =", "= models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800,", "blank=True) RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'), )", "= ['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class", "Meta: ordering = [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True,", "organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return", "but reverse-chronological is a good # default ordering for most models. ordering =", "= models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True )", "date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True, help_text=\"URL", "class Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255,", "and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True,", "in reverse-chronological order. We can override this on a # per-model basis as", "limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile =", "= [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return", "auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if you are", "('youtube', 'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), ) website =", "blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1,", "other system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\",", "image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE)", "if this is not linked to a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name", "reverse-chronological is a good # default ordering for most models. ordering = ['-created_at',", "be published'), ) status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True)", "space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group", "related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving force boundary", "'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language", "models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied", "RELEVANCE = ( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance", "= models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3', '3 - The item is", "= models.CharField(max_length=255, null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True,", "research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'),", "blank=True, limit_choices_to={'slug__isnull': False}) materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self):", "class PeopleForm(ModelForm): class Meta: model = People exclude = ['id'] class PeopleNote(models.Model): people", "blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image", "on_site = CurrentSiteManager() def __str__(self): return '%s %s' % (self.firstname, self.lastname) class Meta:", "'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url'] labels = { 'name': 'Project title', 'thesistype':", "include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta: ordering = [\"position\", \"tag__name\"]", "class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude", "study using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes", "to a publication\") source = models.TextField(null=True, blank=True, help_text=\"Name of the source website/article --", "materials = models.CharField(\"materials / bulk materials\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) energy = models.CharField(max_length=1,", "yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = (", "else: return self.name class Meta: ordering = [\"name\"] class MethodClassification(models.Model): name = models.CharField(max_length=255)", "= models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS = ( ('pending', 'Pending'),", "('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), )", "models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other', 'Other'),", "objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return self.name def get_absolute_url(self): return", "= \"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class", "= models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication Types'), (2, 'Metabolism Studies'), (3,", "= models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a previous classification - can be", "reverse-chronological order. We can override this on a # per-model basis as needed,", "('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True)", "Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255)", "Article fields = ['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model", "= models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, null=True, blank=True)", "Do'), ('newsevents', 'News and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about') site =", "models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True) includes_form = models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The", "models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True) head =", "class Meta: model = Article fields = ['title', 'introduction', 'content', 'image', 'active'] class", "models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being visualized -- ONLY enter if this", "on_delete=models.CASCADE, default=settings.SITE_ID) organizations = models.ManyToManyField(Organization, blank=True) objects = models.Manager() on_site = CurrentSiteManager() def", "using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes =", "blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to implement it?\") consideration", "def __str__(self): return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering", "Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9,", ") method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True)", "blank=True, help_text=\"Specification of flows between sectors, industries or acticity fields, or other system", "'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels = { 'authorlist': 'Author(s)', 'doi':", "the list, please leave empty and add the name in the comments\") event", "['id', 'processes'] class Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class Meta:", "= ( ('nw', 'Not worked on'), ('ip', 'In progress'), ('dr', 'Draft ready for", "Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'),", "ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude = ['id', 'organizations', 'processes', 'date_added', 'event',", "about the importing process') url = models.CharField(max_length=500, null=True, blank=True) doi = models.CharField(max_length=255, null=True,", "models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing", "at all'), ) substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements", "models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos are best - please resize to 350x350 pixels\")", "blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors =", "model = VideoCollection exclude = ['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255) url", "models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'), ('ongoing', 'In", "+ self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link =", "name = models.CharField(max_length=20) css = models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model): name", "(10, 'Other'), ) parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a previous", "blank=True) internal_notes = models.TextField(null=True, blank=True) output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True)", "= models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True) head = models.TextField(null=True, blank=True)", "'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What We Do'), ('newsevents', 'News and Events'),", "(optional)' } class VideoUploadForm(ModelForm): class Meta: model = Video fields = ['title', 'website',", "('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True) CITYLOOPS = ( ('no',", "300), 'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)},", "author = models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES = (", "'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'),", "('nw', 'Not worked on'), ('ip', 'In progress'), ('dr', 'Draft ready for review'), ('rv',", "'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels = {", "limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent = models.ForeignKey('self',", "\" + self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def", "help_text=\"Key socio-institutional entity (driving force boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE,", "models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField()", "help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def __str__(self): return self.title", "return self.title class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class Meta: model =", "models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def __str__(self): return self.title class DataViz(models.Model): title", "'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True,", "blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False, help_text=\"Mark if tag is", "models.BooleanField() events = models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField()", "between sectors, industries or acticity fields, or other system components\") classification = models.ManyToManyField(MethodClassification,", "= [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description", "Meta: model = Reference exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces',", "null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by", "cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also", "= True # By default, any model that inherits from `TimestampedModel` should #", "= models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True,", "flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Note: could also be considered as consumption-based accounting?)\")", "method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace,", "= models.CharField(max_length=2, choices=STATUS, null=True, blank=True) position = models.PositiveSmallIntegerField(null=True, blank=True) material_scope = models.CharField(max_length=255, null=True,", "class Meta: ordering = [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url = models.CharField(max_length=255,", "= models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def", "in the mode of analysis, and in a partial or conditional way'), ('0',", "blank=True) gaps_addressed = models.TextField(null=True, blank=True, help_text=\"What gaps does in other methodologies does this", "['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels = { 'authorlist':", "blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name_plural", "on_delete=models.CASCADE, null=True, blank=True, help_text=\"If the journal does not appear in the list, please", "= models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"] class", "= models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters', 'Master'), ('phd', 'PhD'), ('other',", "= models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True,", "= models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity (driving force boundary for induced flows)\")", "We can override this on a # per-model basis as needed, but reverse-chronological", "(1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True)", "blank=True, help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose", "= models.CharField(max_length=255, null=True, blank=True) date = models.DateField() def __str__(self): return self.title class DataViz(models.Model):", "('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language =", "= Reference exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials']", "to implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy / closing loop consideration\")", "blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) def __str__(self): return self.people.firstname + \" \"", "this out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent',", "people = models.ForeignKey(People, on_delete=models.CASCADE) class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta:", "= models.CharField(max_length=255) website = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) publisher =", "title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True, blank=True)", "title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal", "= ( ('academic', 'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'),", "= ( ('no', 'No'), ('pending', 'Yes - pending'), ('yes', 'Yes - completed'), )", "position = models.PositiveSmallIntegerField(default=1) objects = models.Manager() on_site = CurrentSiteManager() show_in_list = models.BooleanField(default=True) def", "profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True)", "class PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True)", "null=True, blank=True) description = models.TextField(null=True, blank=True) member_since = models.DateField(null=True, blank=True, db_index=True) user =", "estimated_date = models.CharField(max_length=60, null=True, blank=True) location = models.CharField(max_length=255, null=True, blank=True) url = models.CharField(max_length=255,", "from django.contrib.auth.models import User from django.contrib.auth import get_user_model User = get_user_model() from django.contrib.sites.models", "'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI',", "processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification", "__str__(self): return self.name class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta: model", "'doi', 'isbn', 'url', 'comments', 'file'] labels = { 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn':", "blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload", "is a defining feature of the approach'), ('2', '2 - The feature is", "import ModelForm from django.template.defaultfilters import slugify from tinymce import HTMLField from django.contrib.auth.models import", "= models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True) ORG_TYPE = ( ('academic', 'Research Institution'), ('universities', 'Universities'),", "between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between sectors, industries", "self.title class Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class Meta: model = Article", "= CurrentSiteManager() show_in_list = models.BooleanField(default=True) def __str__(self): return self.title class Meta: ordering =", "\"tag__name\"] class TagForm(ModelForm): class Meta: model = Tag exclude = ['id', 'gps', 'parent',", "'Planned'), ('ongoing', 'In progress'), ('finished', 'Finished'), ('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS,", "null=True, blank=True) description = HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses", "'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS,", "CurrentSiteManager() primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail =", "socio-institutional entity (driving force boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True,", "null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True, blank=True) sustainability_criteria_reference", "('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category = models.ForeignKey(MethodCategory,", "choices=CITYLOOPS, null=True, blank=True) logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True,", "models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People,", "last updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True # By default,", "researchgate = models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process',", "= models.BooleanField(default=False) content = HTMLField('Content', help_text=\"The content field is a required field -", "included at all'), ) substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True,", "'status', 'url'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)',", "return self.name @property def shortcode(self): \"Returns abbreviation -- text between parenthesis -- if", "model = Project fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description',", "tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in", "cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments", "Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta: model = Organization exclude =", "= { 'primary_space': 'Reference space (optional)' } class VideoUploadForm(ModelForm): class Meta: model =", "= models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) avoidance_double_counting = models.NullBooleanField(null=True,", "= models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\", max_length=1, choices=METHOD_SCORING, null=True,", "\"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model):", "- \" + self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True, blank=True)", "models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def __str__(self): return", "= StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space", "VideoCollection exclude = ['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255)", "models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True)", "methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True)", "have permission to do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments", "object was last updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True #", "models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True,", "['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url']", "self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self): return", "We Do'), ('newsevents', 'News and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about') site", "# Used for image resizing from stdimage.models import StdImageField import re from django.urls", "models.CharField(max_length=255) description = models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255)", "previous classification - can be left empty\") def __str__(self): return self.name @property def", "= models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class Article(models.Model): title = models.CharField(max_length=255)", "Meta: model = Reference fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year',", "= models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'),", "on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url = models.CharField(max_length=255,", "null=True) authorlist = models.TextField() type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE) journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True,", "= ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn',", "blank=True) funding_program = models.CharField(max_length=255, null=True, blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True,", "blank=True) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return '%s %s' %", "class Meta: model = People exclude = ['id'] class PeopleNote(models.Model): people = models.ForeignKey(People,", "models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True) people = models.ManyToManyField(People, blank=True) VIDEOSITES =", "null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type':", "= models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE,", "data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1,", "user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people', help_text=\"Square photos", "primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True) collections = models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True,", "limit_choices_to={'parent_tag__id': 318}, related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in UM systems'), ('2', 'Flows", "'organizations'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)', 'supervisor':", "( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE)", "upload_to='people', help_text=\"Square photos are best - please resize to 350x350 pixels\") PEOPLE_STATUS =", "return self.name def get_absolute_url(self): return reverse(\"core:project\", args=[self.type, self.id]) class Meta: ordering = ['name']", "null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) description", "'%s %s' % (self.firstname, self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"] class PeopleForm(ModelForm):", "description = HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position = models.PositiveSmallIntegerField(default=1)", "= models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = (", "null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title class EventForm(ModelForm):", "= models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) description = HTMLField(null=True, blank=True) url", "Government'), ('statistical_agency', 'Statistical Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'),", "('multimedia', 'Multimedia'), ) group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return self.name", "(1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6,", "materials = models.ManyToManyField('staf.Material', blank=True) spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class", "material and energy\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING,", "models.ManyToManyField(VideoCollection, blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True)", "('cancelled', 'Cancelled'), ) status = models.CharField(max_length=20, choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review =", "created_by = models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class Article(models.Model): title =", "models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds only\") materials", "('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE", "= models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True,", "= models.ForeignKey( 'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True ) authors = models.ManyToManyField(People, blank=True) active", "('seminar', 'Seminar'), ('other', 'Other'), ) start = models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True)", "doi = models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True,", "from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.conf import settings #", "spaces = models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class Meta: ordering = [\"-year\",", "class Meta: model = Project exclude = ['id', 'site', 'references', 'organizations'] labels =", "for review'), ('rv', 'Reviewed - DONE'), ('ec', 'External copy'), ('sk', 'Skip - will", "services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "description = models.TextField() def __str__(self): return self.name class MethodTemporalBoundary(models.Model): name = models.CharField(max_length=255) def", "study\") def __str__(self): return self.title class Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model):", "= Reference fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language',", "(300, 300), 'large': (1600,1600)}, null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager()", "class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm): class Meta: model = Organization exclude", "self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE) class", "required field - be sure to fill this out\") image = models.ImageField(null=True, blank=True,", "= ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), ) type = models.CharField(max_length=20,", "not linked to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data", "impacts = models.CharField(\"quantitative weighting of impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True)", "css = models.CharField(max_length=20) def __str__(self): return self.name class Project(models.Model): name = models.CharField(max_length=255, null=True,", "this is not linked to a publication\") class Meta: ordering = [\"date\"] def", "Agency'), ('private_sector', 'Private Sector'), ('publisher', 'Publishers'), ('ngo', 'NGO'), ('other', 'Other'), ) type =", "name = models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE,", "blank=True) include_in_list = models.NullBooleanField(default=False) def __str__(self): return self.tag.name class Meta: ordering = [\"position\",", "choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between sectors, industries or acticity fields, or", "type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name + \" - \" +", "self.name class UserLog(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log') space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True,", "StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)}) uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE) space =", "null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo", "models.ForeignKey(User, on_delete=models.CASCADE) class Meta: ordering = [\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug", "studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model): user", "'large': (1600,1600)}, null=True, blank=True) image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True,", "Meta: model = Article fields = ['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm):", "as needed, but reverse-chronological is a good # default ordering for most models.", "The item is included only occasionally in the mode of analysis, and in", "objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return '%s %s' % (self.firstname,", "choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows =", "'Research Institution'), ('universities', 'Universities'), ('city_government', 'City Government'), ('regional_government', 'Regional Government'), ('national_government', 'National Government'),", "'large': (800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)},", "blank=True) start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS = ( ('planned',", "\" - \" + self.type + \" - \" + self.project.name class Timeline(models.Model):", "= models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return", "= models.CharField(max_length=255) description = models.TextField() author = models.CharField(max_length=255) date = models.DateField(null=True) people =", "'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), ) parent =", "resize to 350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'),", "is not linked to a publication\") class Meta: ordering = [\"date\"] def __str__(self):", "+ self.reference.title class MaterialGroup(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def __str__(self):", "blank=True) impacts = models.CharField(\"quantitative weighting of impacts of material flows\", max_length=1, choices=METHOD_SCORING, null=True,", "sure to fill this out\") image = models.ImageField(null=True, blank=True, upload_to='articles') parent = models.ForeignKey(", "if self.journal: return self.journal.name elif self.event: return self.event.name else: return self.type.name def accountingMethods(self):", "ordering = [\"name\"] class People(models.Model): firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation =", "date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the", "models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup',", "= models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at", "created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this object was last updated.", "models.TextField(null=True, blank=True) also_known_as = models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True,", "self.title class DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large':", "you have permission to do so') open_access = models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False)", "= models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication Types'),", "spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255,", "Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction = models.TextField(null=True,", "get_user_model() from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.conf import settings", "and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True,", "('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language = models.CharField(max_length=255, blank=True, null=True) authorlist", "(driving force boundary for induced flows)\") temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources", "member_since = models.DateField(null=True, blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image =", "blank=True) url = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return self.article.title class EventForm(ModelForm): class", "'Researcher(s)', 'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization =", "Publisher(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering = [\"name\"]", "return '%s %s' % (self.firstname, self.lastname) class Meta: ordering = [\"firstname\", \"lastname\"] class", "to a publication\") year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being visualized", "['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255) description = models.TextField()", "are the creator or you have permission to do so') open_access = models.NullBooleanField(null=True,", "PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending", "= models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave =", "tag is superseded/not yet approved/deactivated\") include_in_glossary = models.BooleanField(db_index=True, default=False) is_accounting_method = models.BooleanField(db_index=True, default=False)", "null=True, blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action =", "models.BooleanField() news = models.BooleanField() events = models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField()", "models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden':", "return self.organization.name + \" - \" + self.type + \" - \" +", "= Tag exclude = ['id', 'gps', 'parent', 'hidden'] class Reference(models.Model): title = models.CharField(max_length=255)", "blank=True, db_index=True) user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True) image = models.ImageField(null=True, blank=True, upload_to='people',", "the creator or you have permission to do so') open_access = models.NullBooleanField(null=True, blank=True)", "__str__(self): return self.tag.name class Meta: ordering = [\"position\", \"tag__name\"] class TagForm(ModelForm): class Meta:", "models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose = models.TextField(null=True, blank=True, help_text=\"Purpose of the study\") def __str__(self):", "organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes = models.ManyToManyField('staf.Process',", "models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField() theses = models.BooleanField()", "models.BooleanField(default=True) def __str__(self): return self.title class Meta: ordering = [\"position\"] class VideoCollectionForm(ModelForm): class", "'ISBN', 'url': 'URL', } class ReferenceFormAdmin(ModelForm): class Meta: model = Reference exclude =", "null=True, blank=True) main_measurement_unit = models.CharField(max_length=255, null=True, blank=True) mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'),", "class TimestampedModel(models.Model): # A timestamp representing when this object was created. created_at =", "related_name=\"methods\") METHOD_CLASS = ( ('3', 'Relation in UM systems'), ('2', 'Flows of substances'),", "ONLY enter if this is not linked to a publication\") year = models.PositiveSmallIntegerField(null=True,", "year = models.PositiveSmallIntegerField(null=True, blank=True, help_text=\"Year of the data being visualized -- ONLY enter", "representing when this object was created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting", "models.Manager() on_site = CurrentSiteManager() date = models.DateField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at =", "closing loop consideration\") target_audience = models.TextField(null=True, blank=True, help_text=\"Target audience of results\") indicators =", "models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'), ) type", "resizing from stdimage.models import StdImageField import re from django.urls import reverse class TimestampedModel(models.Model):", "is included only occasionally in the mode of analysis, and in a partial", "return self.title class DataViz(models.Model): title = models.CharField(max_length=255) image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300),", "models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name=\"method_scales\", blank=True) entity = models.CharField(max_length=255, null=True, blank=True, help_text=\"Key socio-institutional entity", "parenthesis -- if there is any\" if \"(\" in self.name: s = self.name", "website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects =", "= Event exclude = ['article'] class VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description',", "on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave = models.CharField(\"cradle-to-grave sources of flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True,", "exclude = ['id', 'site'] class Video(models.Model): title = models.CharField(max_length=255) url = models.CharField(max_length=255) description", "please resize to 350x350 pixels\") PEOPLE_STATUS = ( ('active', 'Active'), ('retired', 'Retired'), ('deceased',", "empty\") def __str__(self): return self.name @property def shortcode(self): \"Returns abbreviation -- text between", "= models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic', 'Academic'), ('theses', 'Theses'), ('reports', 'Reports'),", "\"title\"] def source(self): \"Return details of where this reference was published at/in\" if", "'references', 'organizations'] labels = { 'name': 'Project title', 'thesistype': 'Thesis type', 'researcher': 'Researcher(s)',", ") type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class Meta: ordering =", "models.CharField(max_length=255) def __str__(self): return self.name class MethodData(models.Model): name = models.CharField(max_length=255) def __str__(self): return", "blank=True) reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction,", "{ 'authorlist': 'Author(s)', 'doi': 'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization", "description = HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True,", "('projects', 'Projects'), ('applied', 'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = (", "on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3}) material_groups = models.ManyToManyField(MaterialGroup, blank=True) ongoing =", "add the name in the comments\") event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year", "args=[self.type, self.id]) class Meta: ordering = ['name'] class ProjectForm(ModelForm): class Meta: model =", "= models.CharField(max_length=255, null=True, blank=True) description = HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True,", "EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar', 'Seminar'), ('other', 'Other'),", "self.title class Meta: verbose_name_plural = \"case studies\" class UserAction(models.Model): name = models.CharField(max_length=255) def", "self.title class VideoForm(ModelForm): class Meta: model = Video exclude = ['id', 'site'] labels", "( ('active', 'Active'), ('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), )", "created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title class Meta: ordering", "return self.name class Meta: ordering = [\"name\"] class Organization(models.Model): name = models.CharField(max_length=255) url", "blank=True) energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True) outputs = models.CharField(\"outputs to environment\", max_length=1,", "related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference', 'Conference'), ('hackathon', 'Hackathon'), ('workshop', 'Workshop'), ('seminar',", "there is any\" if \"(\" in self.name: s = self.name return s[s.find(\"(\")+1:s.find(\")\")] else:", "= models.TextField(null=True, blank=True) date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True) file = models.FileField(null=True, blank=True, upload_to='references',", "return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people = models.ForeignKey(People, on_delete=models.CASCADE)", "'Relation in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts'), ) method_class", "was a previous classification - can be left empty\") def __str__(self): return self.name", "variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True) image1 = StdImageField(upload_to='projects', variations={'thumb': (300,", "('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'), ) language = models.CharField(max_length=2, choices=LANGUAGES) title_original_language =", "blank=True) parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden =", "null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True, blank=True) description", "'3 - The item is a defining feature of the approach'), ('2', '2", "next steps to further develop/improve this methodology\") representative_paper = models.TextField(null=True, blank=True, help_text=\"Which paper", "max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Specification of flows between sectors, industries or acticity fields,", "VideoCollectionForm(ModelForm): class Meta: model = VideoCollection exclude = ['id', 'site'] class Video(models.Model): title", "choices=METHOD_SCORING, null=True, blank=True) stock_changes = models.CharField(\"stock changes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) specific =", "'0 - Not included at all'), ) substances = models.CharField(\"selected specific substances\", max_length=1,", "null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate", "should # be ordered in reverse-chronological order. We can override this on a", "= models.TextField(null=True, blank=True) internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list =", "models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True)", "on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True)", "choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site = CurrentSiteManager()", "PeopleNote(models.Model): people = models.ForeignKey(People, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) note = models.TextField(null=True, blank=True) created_by", "ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP = ( ('academic',", "= models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255,", "updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True # By default, any", "ArticleForm(ModelForm): class Meta: model = Article fields = ['title', 'introduction', 'content', 'image', 'active']", "class Meta: ordering = [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website = models.CharField(max_length=255,", "'YouTube'), ('vimeo', 'Vimeo'), ('wikimedia', 'Wikimedia Commons'), ('other', 'Other website'), ) website = models.CharField(max_length=20,", "the techique'), ('1', '1 - The item is included only occasionally in the", "# be ordered in reverse-chronological order. We can override this on a #", "= models.CharField(max_length=255) url = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True, blank=True) linkedin", "'Other website'), ) website = models.CharField(max_length=20, choices=VIDEOSITES) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects", "models.ManyToManyField(People, blank=True) active = models.BooleanField(default=True) SECTIONS = ( ('about', 'About'), ('community', 'Community'), ('research',", "HTMLField('description', null=True, blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True)", "appear in the list, please leave empty and add the name in the", "of substances'), ('1', 'Environmental impacts'), ) method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True) category", "-- if there is any\" if \"(\" in self.name: s = self.name return", "verbose_name_plural = \"Method families\" verbose_name = \"method family\" class Method(models.Model): tag = models.OneToOneField(Tag,", "models.CharField(max_length=20, choices=GROUP, null=True, blank=True) def __str__(self): return self.name class Meta: ordering = [\"name\"]", "start = models.DateField(null=True, blank=True) end = models.DateField(null=True, blank=True) type = models.CharField(max_length=20, choices=EVENT_TYPE) estimated_date", "settings # Used for image resizing from stdimage.models import StdImageField import re from", "= models.TextField(null=True, blank=True, help_text=\"Which paper is a representative case study using this methodology?\")", "= models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) position", "blank=True) strengths = HTMLField('strengths', null=True, blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) def __str__(self):", "datasets = models.BooleanField() news = models.BooleanField() events = models.BooleanField() publications = models.BooleanField() dataviz", "default=False) is_accounting_method = models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication Types'), (2, 'Metabolism", "= models.ManyToManyField(ReferenceSpace, blank=True) def __str__(self): return self.title class Meta: ordering = [\"-year\", \"title\"]", "class VideoCollection(models.Model): title = models.CharField(max_length=255) description = HTMLField('description', null=True, blank=True) site = models.ForeignKey(Site,", "reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True) process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True) date", "reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES = ( ('publisher', 'Publisher'), ('commissioner', 'Commissioner'), ('organization', 'Organization'),", "help_text=\"Name of the source website/article -- ONLY enter if this is not linked", "models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) STATUS =", "models.CharField(\"accounts for hidden flows\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) impacts = models.CharField(\"quantitative weighting of", ") parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text=\"This was a previous classification -", "('yes', 'Yes - completed'), ) cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True) logo =", "return self.name class Meta: ordering = [\"name\"] class Journal(models.Model): name = models.CharField(max_length=255) website", "('retired', 'Retired'), ('deceased', 'Deceased'), ('inactive', 'Inactive'), ('pending', 'Pending Review'), ) status = models.CharField(max_length=8,", "class EventForm(ModelForm): class Meta: model = Event exclude = ['article'] class VideoCollection(models.Model): title", "firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) affiliation = models.CharField(max_length=255, null=True, blank=True) email =", "models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True) funding_program = models.CharField(max_length=255,", "blank=True) weaknesses = HTMLField('weaknesses', null=True, blank=True) STATUS = ( ('nw', 'Not worked on'),", "= models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"] class Color(models.Model): name = models.CharField(max_length=20)", "please leave empty and add the name in the comments\") event = models.ForeignKey(Event,", "blank=True) thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails') license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True) def", "By default, any model that inherits from `TimestampedModel` should # be ordered in", "is a representative case study using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True) also_known_as", "('active', 'Active'), ('deleted', 'Deleted'), ) status = models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People,", "does this particular methodology address?\") next_steps = models.TextField(null=True, blank=True,help_text=\"The proposed next steps to", "= models.BooleanField() publications = models.BooleanField() dataviz = models.BooleanField() multimedia = models.BooleanField() projects =", "progress'), ('dr', 'Draft ready for review'), ('rv', 'Reviewed - DONE'), ('ec', 'External copy'),", "'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) TYPES", "models.CharField(max_length=255, null=True, blank=True) isbn = models.CharField(max_length=255, null=True, blank=True) comments = models.TextField(null=True, blank=True) STATUS", "related_name='people_city', limit_choices_to={'type': 3}) country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2}) profile", "methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318},", "null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes", "'DOI', 'isbn': 'ISBN', 'url': 'URL', } class ReferenceOrganization(models.Model): organization = models.ForeignKey(Organization, on_delete=models.CASCADE) reference", "blank=True) linkedin = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True) logo =", "paper is a representative case study using this methodology?\") materials_catalog_used = models.TextField(null=True, blank=True)", "blank=True) output_tools = models.TextField(null=True, blank=True) output_reports = models.TextField(null=True, blank=True) output_articles = models.TextField(null=True, blank=True)", "models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID) objects = models.Manager() on_site =", "'Applied research'), ) type = models.CharField(max_length=20, choices=TYPE) THESISTYPE = ( ('bachelor', 'Bachelor'), ('masters',", "'Pending Review'), ) status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active') site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)", "= models.CharField(max_length=255, null=True, blank=True) supervisor = models.CharField(max_length=255, null=True, blank=True) email = models.CharField(max_length=255, null=True,", "models.TextField(null=True, blank=True, help_text=\"Which paper is a representative case study using this methodology?\") materials_catalog_used", "= models.ManyToManyField(MaterialGroup, blank=True) ongoing = models.CharField(max_length=255, null=True, blank=True, help_text=\"Do they continue to implement", "help_text=\"Square photos are best - please resize to 350x350 pixels\") PEOPLE_STATUS = (", "null=True, blank=True) full_name = models.CharField(max_length=255, null=True, blank=True) institution = models.CharField(max_length=255, null=True, blank=True) organizations", "models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False}) processes", "null=True, blank=True) production = models.CharField(\"production processes\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) between_flows = models.CharField(\"between-flows\",", "class UserAction(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class UserLog(models.Model): user =", "('l', 'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True)", "help_text=\"Indicators\") pros = models.TextField(null=True, blank=True, help_text=\"Indicators\") cons = models.TextField(null=True, blank=True, help_text=\"Indicators\") purpose =", "that inherits from `TimestampedModel` should # be ordered in reverse-chronological order. We can", "models.BooleanField(db_index=True, default=False) PARENTS = ( (1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'),", "models.BooleanField() multimedia = models.BooleanField() projects = models.BooleanField() theses = models.BooleanField() reference_spaces = models.ManyToManyField(ReferenceSpace,", "/ Project leader(s)', 'url': 'URL', } class ProjectUserForm(ModelForm): class Meta: model = Project", "__str__(self): return self.title class VideoForm(ModelForm): class Meta: model = Video exclude = ['id',", "self.type + \" - \" + self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description", "models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True,", "acticity fields, or other system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale = models.ManyToManyField(Tag,", "HTMLField('description', null=True, blank=True) target_finish_date = models.CharField(max_length=255, null=True, blank=True) start_date = models.DateField(blank=True, null=True) end_date", "self.type.name def accountingMethods(self): return self.tags.filter(is_accounting_method=True, hidden=False) class ReferenceAuthors(models.Model): reference = models.ForeignKey(Reference, on_delete=models.CASCADE) people", "models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name class Meta: ordering = [\"name\"] class OrganizationForm(ModelForm):", "the study\") def __str__(self): return self.title class Meta: verbose_name_plural = \"case studies\" class", "models.NullBooleanField(null=True, blank=True) cityloops = models.BooleanField(default=False) cityloops_comments = models.TextField(null=True, blank=True) cityloops_comments_import = models.TextField(null=True, blank=True,", "budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) print_aim = models.TextField(null=True, blank=True) print_relevance = models.TextField(null=True,", "ONLY enter if this is not linked to a publication\") class Meta: ordering", "developed_by = models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True, blank=True,", "[\"date\"] class Article(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True) introduction", "model = Reference exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags',", "when this object was last updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract =", "'English'), ('ES', 'Spanish'), ('CH', 'Chinese'), ('FR', 'French'), ('GE', 'German'), ('NL', 'Dutch'), ('OT', 'Other'),", "models.TextField(null=True, blank=True, help_text=\"What gaps does in other methodologies does this particular methodology address?\")", "= models.CharField(max_length=8, choices=STATUS, db_index=True) authors = models.ManyToManyField(People, through='ReferenceAuthors') organizations = models.ManyToManyField(Organization, through='ReferenceOrganization') tags", "__str__(self): return self.title class Meta: ordering = [\"title\"] class ArticleForm(ModelForm): class Meta: model", "= models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class", "def __str__(self): return self.title class Meta: ordering = [\"-year\", \"title\"] def source(self): \"Return", "temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True) data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True) cradle_to_grave", "( ('u', 'Unknown'), ('l', 'Low'), ('m', 'Medium'), ('h', 'High'), ) relevance = models.CharField(max_length=1,", "pending_review = models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'), ('projects', 'Projects'), ('applied', 'Applied research'),", "substances = models.CharField(\"selected specific substances\", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text=\"Elements and basic compounds", "class Meta: db_table = 'core_reference_authors' class ReferenceForm(ModelForm): class Meta: model = Reference fields", "'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file'] labels = { 'authorlist':", "description = models.TextField(null=True, blank=True) class Meta: ordering = [\"-date\"] class Color(models.Model): name =", "help_text=\"Do they continue to implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy /", "blank=True) developed_by = models.CharField(max_length=255, null=True, blank=True) based_on = models.TextField(null=True, blank=True) gaps_addressed = models.TextField(null=True,", "models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description = models.TextField(null=True, blank=True) class Meta:", "models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) production = models.CharField(\"production processes\", max_length=1,", "blank=True) internal_notes = models.TextField(null=True, blank=True) complete = models.NullBooleanField(null=True, blank=True) include_in_list = models.NullBooleanField(default=False) def", "models.CharField(max_length=255, null=True, blank=True) logo = models.ImageField(null=True, blank=True, upload_to='organizations') processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull':", "object was created. created_at = models.DateTimeField(auto_now_add=True) # A timestamp reprensenting when this object", "models.CharField(max_length=255) description = models.TextField(null=True, blank=True) link = models.CharField(max_length=255, null=True, blank=True) date = models.DateField()", "= models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True) year = models.PositiveSmallIntegerField() abstract = models.TextField(null=True, blank=True) abstract_original_language", "('ngo', 'NGO'), ('other', 'Other'), ) type = models.CharField(max_length=20, choices=ORG_TYPE) def __str__(self): return self.name", "\" - \" + self.project.name class Timeline(models.Model): title = models.CharField(max_length=255) description = models.TextField(null=True,", "choices=STATUS, default='ongoing') active = models.BooleanField(default=True) pending_review = models.BooleanField(default=True) TYPE = ( ('theses', 'Theses'),", "= models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True) model_id = models.PositiveIntegerField(null=True, blank=True) description =", "or acticity fields, or other system components\") classification = models.ManyToManyField(MethodClassification, blank=True) scale =", "'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other'), )", "choices=THESISTYPE, null=True, blank=True) url = models.CharField(max_length=255, null=True, blank=True) references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status':", "choices=METHOD_SCORING, null=True, blank=True) specific = models.CharField(\"specific goods and services\", max_length=1, choices=METHOD_SCORING, null=True, blank=True)", "research_interests = models.TextField(null=True, blank=True) website = models.CharField(max_length=255, null=True, blank=True) twitter = models.CharField(max_length=255, null=True,", "models.CharField(max_length=255, null=True, blank=True) orcid = models.CharField(max_length=255, null=True, blank=True) researchgate = models.CharField(max_length=255, null=True, blank=True)", "outputs = models.CharField(\"outputs to environment\", max_length=1, choices=METHOD_SCORING, null=True, blank=True) recycling = models.CharField(\"recyling of", "models.DateTimeField(auto_now_add=True) action = models.ForeignKey(UserAction, on_delete=models.CASCADE) points = models.PositiveSmallIntegerField() model = models.CharField(max_length=255, null=True, blank=True)", "this object was last updated. updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True", "'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials'] labels = { 'authorlist': 'Author(s)',", "null=True, blank=True) image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True) image3", "('newsevents', 'News and Events'), ) section = models.CharField(max_length=20, choices=SECTIONS, default='about') site = models.ForeignKey(Site,", "class ReferenceType(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255, null=True, blank=True) GROUP = (", "class Meta: ordering = [\"-year\", \"title\"] def source(self): \"Return details of where this", "a required field - be sure to fill this out\") image = models.ImageField(null=True,", "you are the creator or you have permission to do so') open_access =", "source(self): \"Return details of where this reference was published at/in\" if self.journal: return", "models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}) reference = models.ForeignKey(Reference, on_delete=models.CASCADE) spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id':", "related_name='people_country', limit_choices_to={'type': 2}) profile = models.TextField(null=True, blank=True) research_interests = models.TextField(null=True, blank=True) website =", "material_scope = models.CharField(max_length=255, null=True, blank=True) METHOD_SCORING = ( ('3', '3 - The item", "blank=True, limit_choices_to={'slug__isnull': False}) reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True) description = HTMLField(null=True, blank=True) parent =", "end_date = models.DateField(blank=True, null=True) STATUS = ( ('planned', 'Planned'), ('ongoing', 'In progress'), ('finished',", "'2 - The feature is typically included in the techique'), ('1', '1 -", "= models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, limit_choices_to={'hidden': False}, related_name='children' ) hidden = models.BooleanField(db_index=True, default=False,", "blank=True) methodologies = models.TextField(null=True, blank=True) methodologies_processing_notes = models.TextField(null=True, blank=True) methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id':", "fields = ['title', 'introduction', 'content', 'image', 'active'] class SimpleArticleForm(ModelForm): class Meta: model =", "def __str__(self): return self.name class MethodCategory(models.Model): name = models.CharField(max_length=255) description = HTMLField('description', null=True,", "Event(models.Model): article = models.OneToOneField( Article, on_delete=models.CASCADE, related_name='event', primary_key=True, ) EVENT_TYPE = ( ('conference',", "'supervisor': 'Supervisor(s) / Project leader(s)', 'url': 'URL', } class ProjectOrganization(models.Model): organization = models.ForeignKey(Organization,", "class Meta: ordering = [\"date\"] def __str__(self): return self.title class NewsletterSubscriber(models.Model): people =", "fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content'] class Event(models.Model): article =", ") type = models.CharField(max_length=20, choices=TYPES) def __str__(self): return self.organization.name + \" - \"", "( ('about', 'About'), ('community', 'Community'), ('research', 'Research'), ('resources', 'Resources'), ('cities', 'Cities'), ('whatwedo', 'What", "they continue to implement it?\") consideration = models.TextField(null=True, blank=True, help_text=\"Circular economy / closing", "= models.ForeignKey(Site, on_delete=models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager() def __str__(self): return self.name" ]
[ "'\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code == 200: output =", "2, 3], help='Specify the version of CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant',", "CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error", "+ args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code ==", "zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names = os.listdir(contentsPath) for name", "zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as sue: print", "IOError as ioe: print 'Could not write to the /tmp directory.' sys.exit(1) except", "for name in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir +", "3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d ' + args.dir", "was a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error unzipping", "installed successfully.\\n') sys.stdout.flush() except IOError as ioe: print 'Could not write to the", "os import shutil import shlex, subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects", "args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code == 200:", "sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as", "== 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d ' +", "as ioe: print 'Could not write to the /tmp directory.' sys.exit(1) except (ConnectionError,", "files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath =", "Apache webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj) except KeyboardInterrupt: print 'Exit signal", "however composer encountered errors when installing the dependencies. Try running \\'composer install\\' yourself", "'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0]", "proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered errors", "sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print 'There was a problem", "shutil.Error as sue: print 'Make sure ' + args.dir + ' is writable.'", "parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the version of CakePHP you wish", "bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as sue: print 'Make sure", "= open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname", "downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except", "requests import zipfile import sys, os import shutil import shlex, subprocess from requests.exceptions", "shutil import shlex, subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args):", "3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists, if not create it if", "= zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names = os.listdir(contentsPath)", "except shutil.Error as sue: print 'Make sure ' + args.dir + ' is", "args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d '", "__name__ == '__main__': parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version', default=2,", "the Apache webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj) except KeyboardInterrupt: print 'Exit", "of CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP", "name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running", "sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist", "if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered errors when installing the dependencies.", "copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the version of", "not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush()", "exists, if not create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' +", "zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names", "os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results =", "errors when installing the dependencies. Try running \\'composer install\\' yourself in the '", "sys.exit(1) except shutil.Error as sue: print 'Make sure ' + args.dir + '", "print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as sue: print 'Make sure '", "shlex, subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls =", "writable.' sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install a fresh copy of", "import zipfile import sys, os import shutil import shlex, subprocess from requests.exceptions import", "sue: print 'Make sure ' + args.dir + ' is writable.' sys.exit(1) if", "= argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3],", "accordingly.') argsObj = parser.parse_args() try: run(argsObj) except KeyboardInterrupt: print 'Exit signal received...' sys.exit(1)", "installing the dependencies. Try running \\'composer install\\' yourself in the ' + args.dir", "else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r')", "'/usr/local/bin/composer install --prefer-dist -d ' + args.dir + ' --dev' commandargs = shlex.split(command)", "sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip')", "wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will be installed,", "= os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name,", "= shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer", "Timeout, TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try:", "CakePHP will be installed, alter the Apache webroot accordingly.') argsObj = parser.parse_args() try:", "if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t')", "subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered errors when installing", "help='Select the directory where CakePHP will be installed, alter the Apache webroot accordingly.')", "(ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print 'There was a problem downloading CakePHP.'", "os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir", "print 'Make sure ' + args.dir + ' is writable.' sys.exit(1) if __name__", "\\'composer install\\' yourself in the ' + args.dir + ' directory.') sys.stdout.flush() sys.exit(1)", "args.dir + ' is writable.' sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install", "= requests.get(zipurls[args.version]) if results.status_code == 200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n')", "CakePHP.' sys.exit(1) except shutil.Error as sue: print 'Make sure ' + args.dir +", "that --dir exists, if not create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory:", "<filename>src/cake-install.py #!/usr/bin/python import argparse import requests import zipfile import sys, os import shutil", "dependencies. Try running \\'composer install\\' yourself in the ' + args.dir + '", "'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as sue: print 'Make sure ' +", "problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.' sys.exit(1)", "# check that --dir exists, if not create it if not os.path.exists(args.dir): os.makedirs(args.dir)", "3], help='Specify the version of CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select", "dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names =", "== '__main__': parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int,", "commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however", "stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered errors when installing the", "--prefer-dist -d ' + args.dir + ' --dev' commandargs = shlex.split(command) proc =", "zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' +", "check that --dir exists, if not create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created", "os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results", "if results.status_code == 200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t')", "+ args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + '", "+ ' --dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP", "'There was a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error", "'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists, if not create it if not", "create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir + '\\n')", "+ ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + ' was installed", "re: print 'There was a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe:", "ioe: print 'Could not write to the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError,", "results = requests.get(zipurls[args.version]) if results.status_code == 200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close()", "not create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir +", "sys.stdout.write('Created directory: ' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version])", "running \\'composer install\\' yourself in the ' + args.dir + ' directory.') sys.stdout.flush()", "' + args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) +", "import requests import zipfile import sys, os import shutil import shlex, subprocess from", "as bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as sue: print 'Make", "HTTPError, Timeout, TooManyRedirects) as re: print 'There was a problem downloading CakePHP.' sys.exit(1)", "directory: ' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if", "+ ' was installed successfully.\\n') sys.stdout.flush() except IOError as ioe: print 'Could not", "= zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname", "zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names = os.listdir(contentsPath) for name in", "output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close()", "args.dir + ' --dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()):", "to the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print", "sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code == 200: output = open('/tmp/file.zip', 'wb') output.write(results.content)", "argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify", "directory where CakePHP will be installed, alter the Apache webroot accordingly.') argsObj =", "installed, however composer encountered errors when installing the dependencies. Try running \\'composer install\\'", "installed, alter the Apache webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj) except KeyboardInterrupt:", "import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip',", "composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d ' + args.dir + '", "open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname =", "zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir", "'/tmp/' + dirname names = os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath +", "args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer", "command = '/usr/local/bin/composer install --prefer-dist -d ' + args.dir + ' --dev' commandargs", "directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print 'There was a", "' is writable.' sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install a fresh", "'Could not write to the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects)", "parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will be installed, alter the Apache", "' + str(args.version) + ' was installed successfully.\\n') sys.stdout.flush() except IOError as ioe:", "{1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists, if", "+ '/' + name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version", "+ name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3:", "+ dirname names = os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath + name):", "= '/usr/local/bin/composer install --prefer-dist -d ' + args.dir + ' --dev' commandargs =", "shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered", "the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print 'There", "dirname names = os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath", "sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code == 200: output = open('/tmp/file.zip',", "CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code == 200: output = open('/tmp/file.zip', 'wb')", "(proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered errors when installing the dependencies. Try", "= {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists,", "' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + ' was installed successfully.\\n')", "'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists, if not", "was installed successfully.\\n') sys.stdout.flush() except IOError as ioe: print 'Could not write to", "+ args.dir + ' --dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if", "args.dir + '/' + name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if", "requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2:", "def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check", "it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir + '\\n') sys.stdout.write('Downloading", "write to the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re:", "except IOError as ioe: print 'Could not write to the /tmp directory.' sys.exit(1)", "import shlex, subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls", "2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists, if not create", "you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will be", "/tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print 'There was", "a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.'", "sys.stdout.write('CakePHP was installed, however composer encountered errors when installing the dependencies. Try running", "is writable.' sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install a fresh copy", "' + args.dir + ' --dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE)", "= subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed, however composer encountered errors when", "+ ' is writable.' sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install a", "the version of CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory", "except zipfile.BadZipfile as bzfe: print 'Error unzipping CakePHP.' sys.exit(1) except shutil.Error as sue:", "in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir + '/' +", "'__main__': parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1,", "name in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir + '/'", "will be installed, alter the Apache webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj)", "if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d", "successfully.\\n') sys.stdout.flush() except IOError as ioe: print 'Could not write to the /tmp", "TooManyRedirects) as re: print 'There was a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile", "ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3:", "where CakePHP will be installed, alter the Apache webroot accordingly.') argsObj = parser.parse_args()", "Try running \\'composer install\\' yourself in the ' + args.dir + ' directory.')", "import sys, os import shutil import shlex, subprocess from requests.exceptions import ConnectionError, HTTPError,", "install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will be installed, alter the", "argparse import requests import zipfile import sys, os import shutil import shlex, subprocess", "default='/vagrant', help='Select the directory where CakePHP will be installed, alter the Apache webroot", "shutil.copytree(contentsPath + name, args.dir + '/' + name) else: shutil.copy2(contentsPath + name, args.dir)", "if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir + '/' + name) else:", "sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d ' + args.dir +", "in the ' + args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' +", "zipfile import sys, os import shutil import shlex, subprocess from requests.exceptions import ConnectionError,", "run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that", "200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile =", "the directory where CakePHP will be installed, alter the Apache webroot accordingly.') argsObj", "try: # check that --dir exists, if not create it if not os.path.exists(args.dir):", "output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip')", "' + args.dir + ' is writable.' sys.exit(1) if __name__ == '__main__': parser", "os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names = os.listdir(contentsPath) for name in names:", "webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj) except KeyboardInterrupt: print 'Exit signal received...'", "sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + ' was installed successfully.\\n') sys.stdout.flush() except", "of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the version of CakePHP", "names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir + '/' + name)", "+ name, args.dir + '/' + name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath)", "sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/'", "to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will be installed, alter", "install\\' yourself in the ' + args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP", "CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will", "subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls = {1:", "+ args.dir + ' is writable.' sys.exit(1) if __name__ == '__main__': parser =", "sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath", "the ' + args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version)", "choices=[1, 2, 3], help='Specify the version of CakePHP you wish to install.') parser.add_argument('--dir',", "--dir exists, if not create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: '", "be installed, alter the Apache webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj) except", "name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command =", "alter the Apache webroot accordingly.') argsObj = parser.parse_args() try: run(argsObj) except KeyboardInterrupt: print", "'Make sure ' + args.dir + ' is writable.' sys.exit(1) if __name__ ==", "type=int, choices=[1, 2, 3], help='Specify the version of CakePHP you wish to install.')", "requests.get(zipurls[args.version]) if results.status_code == 200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting", "except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re: print 'There was a problem downloading", "sys.stdout.write('CakePHP ' + str(args.version) + ' was installed successfully.\\n') sys.stdout.flush() except IOError as", "not write to the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as", "' --dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was", "as re: print 'There was a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as", "help='Specify the version of CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the", "yourself in the ' + args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP '", "str(args.version) + ' was installed successfully.\\n') sys.stdout.flush() except IOError as ioe: print 'Could", "version of CakePHP you wish to install.') parser.add_argument('--dir', default='/vagrant', help='Select the directory where", "#!/usr/bin/python import argparse import requests import zipfile import sys, os import shutil import", "HTTPError, Timeout, TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'}", "was installed, however composer encountered errors when installing the dependencies. Try running \\'composer", "sys.stdout.flush() command = '/usr/local/bin/composer install --prefer-dist -d ' + args.dir + ' --dev'", "encountered errors when installing the dependencies. Try running \\'composer install\\' yourself in the", "unzipping CakePHP.' sys.exit(1) except shutil.Error as sue: print 'Make sure ' + args.dir", "if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version',", "the dependencies. Try running \\'composer install\\' yourself in the ' + args.dir +", "print 'Could not write to the /tmp directory.' sys.exit(1) except (ConnectionError, HTTPError, Timeout,", "'/' + name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version ==", "args.dir + ' directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + ' was", "+ '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code == 200: output", "Timeout, TooManyRedirects) as re: print 'There was a problem downloading CakePHP.' sys.exit(1) except", "shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command = '/usr/local/bin/composer install", "import shutil import shlex, subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def", "-d ' + args.dir + ' --dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs,", "import argparse import requests import zipfile import sys, os import shutil import shlex,", "shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush()", "zippedfile.namelist()[0] zippedfile.extractall('/tmp') zippedfile.close() os.remove('/tmp/file.zip') contentsPath = '/tmp/' + dirname names = os.listdir(contentsPath) for", "results.status_code == 200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush()", "directory.') sys.stdout.flush() sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + ' was installed successfully.\\n') sys.stdout.flush()", "print 'There was a problem downloading CakePHP.' sys.exit(1) except zipfile.BadZipfile as bzfe: print", "'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: # check that --dir exists, if not create it", "== 200: output = open('/tmp/file.zip', 'wb') output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile", "as sue: print 'Make sure ' + args.dir + ' is writable.' sys.exit(1)", "name): shutil.copytree(contentsPath + name, args.dir + '/' + name) else: shutil.copy2(contentsPath + name,", "--dev' commandargs = shlex.split(command) proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE) if (proc.stderr.read()): sys.stdout.write('CakePHP was installed,", "fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the version", "' was installed successfully.\\n') sys.stdout.flush() except IOError as ioe: print 'Could not write", "TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip', 2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip', 3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'} try: #", "output.write(results.content) output.close() sys.stdout.write('[DONE]\\n') sys.stdout.write('Extracting files...\\t\\t\\t') sys.stdout.flush() zippedfile = zipfile.ZipFile('/tmp/file.zip') dirname = zippedfile.namelist()[0] zippedfile.extractall('/tmp')", "names = os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath + name): shutil.copytree(contentsPath +", "a fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the", "default=2, type=int, choices=[1, 2, 3], help='Specify the version of CakePHP you wish to", "if not create it if not os.path.exists(args.dir): os.makedirs(args.dir) sys.stdout.write('Created directory: ' + args.dir", "+ name): shutil.copytree(contentsPath + name, args.dir + '/' + name) else: shutil.copy2(contentsPath +", "+ str(args.version) + ' was installed successfully.\\n') sys.stdout.flush() except IOError as ioe: print", "os.path.isdir(contentsPath + name): shutil.copytree(contentsPath + name, args.dir + '/' + name) else: shutil.copy2(contentsPath", "when installing the dependencies. Try running \\'composer install\\' yourself in the ' +", "sys.exit(1) sys.stdout.write('CakePHP ' + str(args.version) + ' was installed successfully.\\n') sys.stdout.flush() except IOError", "contentsPath = '/tmp/' + dirname names = os.listdir(contentsPath) for name in names: if", "install --prefer-dist -d ' + args.dir + ' --dev' commandargs = shlex.split(command) proc", "= '/tmp/' + dirname names = os.listdir(contentsPath) for name in names: if os.path.isdir(contentsPath", "+ name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n') if args.version == 3: sys.stdout.write('Running composer...\\n\\r') sys.stdout.flush() command", "composer encountered errors when installing the dependencies. Try running \\'composer install\\' yourself in", "sys, os import shutil import shlex, subprocess from requests.exceptions import ConnectionError, HTTPError, Timeout,", "parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2,", "sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.')", "CakePHP.') parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the version of CakePHP you", "name, args.dir + '/' + name) else: shutil.copy2(contentsPath + name, args.dir) shutil.rmtree(contentsPath) sys.stdout.write('[DONE]\\n')", "sys.stdout.flush() except IOError as ioe: print 'Could not write to the /tmp directory.'", "sure ' + args.dir + ' is writable.' sys.exit(1) if __name__ == '__main__':", "from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects def run(args): zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip',", "' + args.dir + '\\n') sys.stdout.write('Downloading CakePHP...\\t\\t\\t') sys.stdout.flush() results = requests.get(zipurls[args.version]) if results.status_code" ]
[ "import render_template from app.views.admin import bp_admin @bp_admin.route('/') def index(): return render_template('admin/index.html') @bp_admin.route('/dashboard') def", "from app.views.admin import bp_admin @bp_admin.route('/') def index(): return render_template('admin/index.html') @bp_admin.route('/dashboard') def dashboard(): return", "from flask import render_template from app.views.admin import bp_admin @bp_admin.route('/') def index(): return render_template('admin/index.html')", "app.views.admin import bp_admin @bp_admin.route('/') def index(): return render_template('admin/index.html') @bp_admin.route('/dashboard') def dashboard(): return render_template('admin/dashboard.html')", "flask import render_template from app.views.admin import bp_admin @bp_admin.route('/') def index(): return render_template('admin/index.html') @bp_admin.route('/dashboard')", "render_template from app.views.admin import bp_admin @bp_admin.route('/') def index(): return render_template('admin/index.html') @bp_admin.route('/dashboard') def dashboard():" ]
[ "# 导入所有模型, 用于迁移文件 from db.base_class import Base from models.user import User from models.movie", "用于迁移文件 from db.base_class import Base from models.user import User from models.movie import Movie", "导入所有模型, 用于迁移文件 from db.base_class import Base from models.user import User from models.movie import", "<filename>db/base.py<gh_stars>1-10 # 导入所有模型, 用于迁移文件 from db.base_class import Base from models.user import User from" ]
[ "for i in range(0,count): if i > 0 and i < count-1: L2[i]", "unpack=True) t = np.linspace(1, 2, 11) u = lagrange_interpolate(x, y, t) td =", "np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\"", "for i in range(0,count): if i > 0 and i < count -", "> 0 and i < count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i]", "color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% ####", "+ 1 return R1 def RE2(Y): R2 = np.ones(count) for i in range(0,", "%f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1],", "potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive", "\"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\");", "if i < count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] =", "as np e = 2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a =", "= RE2(Y) td = time.time() - to print(\"The time interval is %f s.\"", "L2 L2 = np.empty(count) for i in range(0,count): if i > 0 and", "= 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2],", "color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of gases import numpy as np T", "((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i = i + 1 return R1 def", "L3[i] = 0 i = i + 1 ## L4 L4 = np.empty(count)", "lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step)", "y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11)", "($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\");", "= np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1 L1 = np.empty(count) for i", "\"\"\" #### Lagrange Interpolation Formula import time import numpy as np import matplotlib.pyplot", "Formula import time import numpy as np import matplotlib.pyplot as plt from scipy.interpolate", "a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib; import", "arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\")", "plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9)", "p_re = lagrange(x_set, y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t =", "1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt", "count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i = i + 1", "= (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i = i + 1 ## L4", "scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]),", "< count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i = i +", "= time.time() - to print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] =", "Transform import numpy as np import matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1])", "rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\",", "plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom in three dimenson\"); plt.xlabel(r\"$k_BT/(\\hbar\\omega)$\"); plt.ylabel(r\"$\\frac{C}{3k_B}$\",rotation=0)", "L2[i] = 0 i = i + 1 ## L3 L3 = np.empty(count)", "scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\",", "B = 2 m = 2; n = 3 step = 0.01 r", "len(X) def RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\" for i in range(0,", "print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u,", "((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True)", "distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r,", "c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\",", "R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i = i + 1 return", "plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\")", "np import matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic", "1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac", "#### Kinetic theory of gases import numpy as np T = np.arange(0,1000,1) k", "plt.show() #%% #### Shanks Transform import numpy as np import matplotlib.pyplot as plt", "= -B/(r**m) # Resulting potential pot = rep + atp import matplotlib; import", "count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i = i + 1", "+ 1/3^2 + 1/4^2 + ... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264", "\"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True)", "plt.show() #%% #### Richardson Extrapolation import numpy as np import time import matplotlib.pyplot", "c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\",", "\"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity", "plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic", "fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95),", "plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\",", "np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\",", "np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params =", "if i > 0 and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i]", "plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9)", "\"\"\" # Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ##", "matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"}", "as plt \"\"\" Sn = 1 + 1/2^2 + 1/3^2 + 1/4^2 +", "as np T = np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v =", "{\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat", "\"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per", "def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set) return", "to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y): R1", "np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as", "i + 1 ## L2 L2 = np.empty(count) for i in range(0,count): if", "import numpy as np e = 2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001))", "numpy as np import matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" #", "plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive", "plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np e = 2.71828182845904523536 k", "X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y): R1 = np.ones(count) \"\"\"", "\"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show()", "count - 1): if i < count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1)", "plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85),", "\"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom", "= time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set,", "= RE1(Y) U2 = RE2(Y) td = time.time() - to print(\"The time interval", "\"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy", "i > 0 and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] =", "- to print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif'", "np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1)", "plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\"))", "R1 def RE2(Y): R2 = np.ones(count) for i in range(0, count - 2):", "i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i = i", "\"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3],", "plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4);", "X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1 L1 = np.empty(count) for", "np.ones(count) \"\"\" doc \"\"\" for i in range(0, count - 1): if i", "plt.legend(loc=\"best\") #%% #### Kinetic theory of gases import numpy as np T =", "Extrapolation import numpy as np import time import matplotlib.pyplot as plt \"\"\" Sn", "- 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i = i +", "(Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i = i + 1 ## L2 L2", "plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4],", "= 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as", "0 i = i + 1 return R2 U1 = RE1(Y) U2 =", "if i > 0 and i < count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1])", "Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\",", "# Repulsive potential rep = A/(r**n) # Attractive potential atp = -B/(r**m) #", "plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat',", "= 2 m = 2; n = 3 step = 0.01 r =", "np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt", "as np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC =", "= 0 i = i + 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y,", "plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu", "i + 1 return R1 def RE2(Y): R2 = np.ones(count) for i in", "plt.legend(loc=\"best\") import numpy as np e = 2.71828182845904523536 k = 1.380649e-23 v =", "for i in range(0,count): if i > 0 and i < count-1: L1[i]", "time import matplotlib.pyplot as plt \"\"\" Sn = 1 + 1/2^2 + 1/3^2", "i = i + 1 return R1 def RE2(Y): R2 = np.ones(count) for", "16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\");", "alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9)", "plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\",", "'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2],", "plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0));", "lagrange to = time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re", "unpack=True) count = len(X) ## L1 L1 = np.empty(count) for i in range(0,count):", "rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\",", "= 1; B = 2 m = 2; n = 3 step =", "i + 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1],", "= np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as", "i = i + 1 ## L2 L2 = np.empty(count) for i in", "in range(0,count): if i > 0 and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1])", "= i + 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9)", "return R2 U1 = RE1(Y) U2 = RE2(Y) td = time.time() - to", "Interpolation Formula import time import numpy as np import matplotlib.pyplot as plt from", "k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot", "i = i + 1 ## L4 L4 = np.empty(count) for i in", "= 3 step = 0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential rep =", "potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np e =", "plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%%", "numpy as np e = 2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a", "i + 1 return R2 U1 = RE1(Y) U2 = RE2(Y) td =", "R2[i] = 0 i = i + 1 return R2 U1 = RE1(Y)", "u = lagrange_interpolate(x, y, t) td = time.time() - to print(\"The time interval", "RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\" for i in range(0, count -", "v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics", "len(X) ## L1 L1 = np.empty(count) for i in range(0,count): if i >", "2 m = 2; n = 3 step = 0.01 r = np.array(np.arange(step,10,step))", "i > 0 and i < count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else:", "plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\")", "/2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\",", "$\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation", "= (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count =", "= {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic", "scipy.interpolate import lagrange to = time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate", "in range(0,count): if i > 0 and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1])", "color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\")", "np.empty(count) for i in range(0,count): if i > 0 and i < count", "count = len(X) ## L1 L1 = np.empty(count) for i in range(0,count): if", "L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4],", "lagrange_interpolate(x, y, t) td = time.time() - to print(\"The time interval is %f", "plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\",", "as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params)", "plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu", "fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00,", "Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\",", "'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2],", "matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\",", "(L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i = i + 1 ## L3 L3", "+ 1 ## L3 L3 = np.empty(count) for i in range(0,count): if i", "i < count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i", "2): if i < count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i]", "0 i = i + 1 ## L2 L2 = np.empty(count) for i", "1 return R1 def RE2(Y): R2 = np.ones(count) for i in range(0, count", "v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True,", "plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9)", "Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\",", "plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as", "import lagrange to = time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\"", "alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import numpy", "- 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i = i", "0 i = i + 1 ## L4 L4 = np.empty(count) for i", "#%% #### Einstein's calculation import numpy as np Interval = 0.01 reT =", "L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i = i + 1 ##", "R1 = np.ones(count) \"\"\" doc \"\"\" for i in range(0, count - 1):", "R1[i] = 0 i = i + 1 return R1 def RE2(Y): R2", "alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% ####", "reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True,", "matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\")", "matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"};", "else: R2[i] = 0 i = i + 1 return R2 U1 =", "Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3],", "\"\"\" for i in range(0, count - 1): if i < count -", "= np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\",", "dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20,", "#### Interatonic potential import numpy as np A = 1; B = 2", "L4 L4 = np.empty(count) for i in range(0,count): if i > 0 and", "\"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y):", "= i + 1 ## L2 L2 = np.empty(count) for i in range(0,count):", "+ ... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n)", "Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\",", "\"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks", "e = 2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n", "plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\")", "< count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i = i +", "count - 2): if i < count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2)", "i in range(0, count - 2): if i < count - 2 :", "xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu", "RE2(Y) td = time.time() - to print(\"The time interval is %f s.\" %td)", "np.ones(count) for i in range(0, count - 2): if i < count -", "\"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\");", "def RE2(Y): R2 = np.ones(count) for i in range(0, count - 2): if", "Repulsive potential rep = A/(r**n) # Attractive potential atp = -B/(r**m) # Resulting", "color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\",", "\"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8);", "= ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i = i + 1 return R2", "else: L4[i] = 0 i = i + 1 plt.rcParams['font.family'] = 'CMU Serif'", "## L4 L4 = np.empty(count) for i in range(0,count): if i > 0", "rep + atp import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True,", "R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i = i + 1 return", "= {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein", "L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i = i + 1 ##", "> 0 and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0", "= np.empty(count) for i in range(0,count): if i > 0 and i <", "plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep,", "c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import", "R2 = np.ones(count) for i in range(0, count - 2): if i <", "as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y =", "plt \"\"\" Sn = 1 + 1/2^2 + 1/3^2 + 1/4^2 + ...", "scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom in three dimenson\"); plt.xlabel(r\"$k_BT/(\\hbar\\omega)$\"); plt.ylabel(r\"$\\frac{C}{3k_B}$\",rotation=0) plt.plot(reT,reC,color=\"#0A8CFF\")", "#%% #### Kinetic theory of gases import numpy as np T = np.arange(0,1000,1)", "plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting", "= np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import", "<reponame>ConAntares/Photonica \"\"\" Python Test \"\"\" #### Lagrange Interpolation Formula import time import numpy", "# Attractive potential atp = -B/(r**m) # Resulting potential pot = rep +", "t) td = time.time() - to print(\"The time interval is %f s.\" %td)", "m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192);", "= 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9)", "{\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory", "to print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t,", "is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9)", "#### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True,", "- 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i = i +", "numpy as np T = np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v", "= ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat',", "range(0,count): if i > 0 and i < count - 1: L3[i] =", "i in range(0,count): if i > 0 and i < count-1: L1[i] =", "import matplotlib.pyplot as plt from scipy.interpolate import lagrange to = time.time() def lagrange_interpolate(x_set,", "interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\",", "as np A = 1; B = 2 m = 2; n =", "\"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom in three dimenson\");", "= (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i = i + 1 plt.rcParams['font.family'] =", "\"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity", "color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np e = 2.71828182845904523536 k = 1.380649e-23", "if i > 0 and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i]", "Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx", "= len(X) ## L1 L1 = np.empty(count) for i in range(0,count): if i", "# Resulting potential pot = rep + atp import matplotlib; import matplotlib.pyplot as", "= lagrange(x_set, y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1,", "plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen", "import numpy as np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1)", "pot = rep + atp import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params", "plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\",", "\"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16);", "Test \"\"\" #### Lagrange Interpolation Formula import time import numpy as np import", "plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r,", "'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in')", "params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True)", "is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9)", "/16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of gases import", "... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) =", "0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import", "= π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to", "range(0, count - 2): if i < count - 2 : R2[i] =", "= 0 i = i + 1 return R1 def RE2(Y): R2 =", "unpack=True) count = len(X) def RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\" for", "t = np.linspace(1, 2, 11) u = lagrange_interpolate(x, y, t) td = time.time()", "- 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i = i", "= ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i = i + 1 return R1", "((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X)", "i = i + 1 return R2 U1 = RE1(Y) U2 = RE2(Y)", "plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\")", "\"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom in three", "11) u = lagrange_interpolate(x, y, t) td = time.time() - to print(\"The time", "= np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y): R1 = np.ones(count) \"\"\" doc", "= 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192);", "import time import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import", "and i < count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0", "0 i = i + 1 return R1 def RE2(Y): R2 = np.ones(count)", "color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy as np", "plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import numpy as", "\"\"\" Python Test \"\"\" #### Lagrange Interpolation Formula import time import numpy as", "= time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y): R1 =", "plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy as np Interval", "i > 0 and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] =", "params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True)", "np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11) u = lagrange_interpolate(x, y, t) td", "as np import matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load", "range(0,count): if i > 0 and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else:", "plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp,", "i in range(0, count - 1): if i < count - 1 :", "def RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\" for i in range(0, count", "color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0,", "and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i =", "plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9)", "plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\",", "\"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\",", "import numpy as np A = 1; B = 2 m = 2;", "= lagrange_interpolate(x, y, t) td = time.time() - to print(\"The time interval is", "to = time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re =", "RE2(Y): R2 = np.ones(count) for i in range(0, count - 2): if i", "y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step) x,", "import numpy as np import matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\"", "plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import numpy as np import time import", "1/4^2 + ... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1)", "np T = np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T)", "\"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic", "of gases import numpy as np T = np.arange(0,1000,1) k = 1.380649e-23 m", "matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16,", "plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import numpy as", "time import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import lagrange", "numpy as np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC", "return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11) u", "p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11) u =", "plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import numpy", "+ 1 ## L2 L2 = np.empty(count) for i in range(0,count): if i", "= np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot", "time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\",", "color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy", "\"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature", "as np import matplotlib.pyplot as plt from scipy.interpolate import lagrange to = time.time()", "np e = 2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64])", "potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r,", "%td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\",", "n = 3 step = 0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential rep", "to print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X,", "+ 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2)", "plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy", "alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import numpy as np import matplotlib.pyplot", "i < count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0", "Python Test \"\"\" #### Lagrange Interpolation Formula import time import numpy as np", "reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\",", "0 and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i", "i + 1 ## L4 L4 = np.empty(count) for i in range(0,count): if", "L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import numpy as np", "0 i = i + 1 ## L3 L3 = np.empty(count) for i", "in range(0,count): if i > 0 and i < count - 1: L3[i]", "if i < count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] =", "as plt from scipy.interpolate import lagrange to = time.time() def lagrange_interpolate(x_set, y_set, t_step):", "np.empty(count) for i in range(0,count): if i > 0 and i < count-1:", "matplotlib.pyplot as plt from scipy.interpolate import lagrange to = time.time() def lagrange_interpolate(x_set, y_set,", "range(0,count): if i > 0 and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else:", "(L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i = i + 1 plt.rcParams['font.family'] = 'CMU", "\"--\", color=\"#A0A0A0\"); plt.annotate(r\"$\\mu\\approx E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu", "-B/(r**m) # Resulting potential pot = rep + atp import matplotlib; import matplotlib.pyplot", "Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen", "Kinetic theory of gases import numpy as np T = np.arange(0,1000,1) k =", "count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i =", "calculation import numpy as np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV =", "plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\")", "{\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac", "= 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2],", "= 0 i = i + 1 ## L4 L4 = np.empty(count) for", "in range(0, count - 1): if i < count - 1 : R1[i]", "L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i = i + 1 ##", "c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%%", "label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np e", "numpy as np import time import matplotlib.pyplot as plt \"\"\" Sn = 1", "1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i = i +", "+ 1/2^2 + 1/3^2 + 1/4^2 + ... + 1/n^2 lim(n->∞)Sn = π^2/6", "for i in range(0, count - 1): if i < count - 1", "< count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i =", "\"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\");", "plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen", "in range(0,count): if i > 0 and i < count - 1: L4[i]", "s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y,", "L1 L1 = np.empty(count) for i in range(0,count): if i > 0 and", "Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show()", "/64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of gases import numpy as np", "L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation", "+ 1 ## L4 L4 = np.empty(count) for i in range(0,count): if i", "1 ## L4 L4 = np.empty(count) for i in range(0,count): if i >", "2; n = 3 step = 0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential", "atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4);", "#### Richardson Extrapolation import numpy as np import time import matplotlib.pyplot as plt", "0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\")", "import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14,", "color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of gases import numpy", "+ 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1],", "+ 1 return R2 U1 = RE1(Y) U2 = RE2(Y) td = time.time()", "c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%%", "td = time.time() - to print(\"The time interval is %f s.\" %td) plt.rcParams['font.family']", "count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i = i", "RE1(Y) U2 = RE2(Y) td = time.time() - to print(\"The time interval is", "statistics import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\",", "= np.ones(count) \"\"\" doc \"\"\" for i in range(0, count - 1): if", "import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import lagrange to", "matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y", "return R1 def RE2(Y): R2 = np.ones(count) for i in range(0, count -", "Interatonic potential import numpy as np A = 1; B = 2 m", "1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i = i + 1", "gases import numpy as np T = np.arange(0,1000,1) k = 1.380649e-23 m =", "range(0, count - 1): if i < count - 1 : R1[i] =", "label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\")", "Richardson Extrapolation import numpy as np import time import matplotlib.pyplot as plt \"\"\"", "%td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\",", "interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\",", "t_step): \"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step) x, y", "plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of gases", "np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import", "label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\")", "from scipy.interpolate import lagrange to = time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange", "\"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step) x, y =", "1/3^2 + 1/4^2 + ... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n)", "= len(X) def RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\" for i in", "2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1)", "Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1 L1", "color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy as np Interval = 0.01", "+ 1/4^2 + ... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) =", "16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\")", "potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\",", "3 step = 0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n)", "0 and i < count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] =", "alpha=0.4); plt.legend(loc=\"best\") import numpy as np e = 2.71828182845904523536 k = 1.380649e-23 v", "#%% #### Shanks Transform import numpy as np import matplotlib.pyplot as plt \"\"\"", "lagrange(x_set, y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2,", "= np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params", "L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show()", "plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0)", "## L3 L3 = np.empty(count) for i in range(0,count): if i > 0", "= 0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib;", "($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% ####", "plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import numpy as np A = 1;", "= 2.71828182845904523536 k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n =", "# Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1", "import numpy as np import time import matplotlib.pyplot as plt \"\"\" Sn =", "L3 L3 = np.empty(count) for i in range(0,count): if i > 0 and", "reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192);", "\"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule", "c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform", "= 0 i = i + 1 ## L3 L3 = np.empty(count) for", "alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import numpy", "xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\",", "atp = -B/(r**m) # Resulting potential pot = rep + atp import matplotlib;", "Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat',", "## L2 L2 = np.empty(count) for i in range(0,count): if i > 0", "import numpy as np T = np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27", "plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's", "plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\",", "= 2; n = 3 step = 0.01 r = np.array(np.arange(step,10,step)) # Repulsive", "numpy as np A = 1; B = 2 m = 2; n", "plt.show() #%% #### Interatonic potential import numpy as np A = 1; B", "0 and i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i", "np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params =", "for i in range(0, count - 2): if i < count - 2", "i > 0 and i < count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else:", "$\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy as np Interval =", "k = 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%%", "= np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params", "Attractive potential atp = -B/(r**m) # Resulting potential pot = rep + atp", "time.time() - to print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU", "E_F$\", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu", "\"\"\" Sn = 1 + 1/2^2 + 1/3^2 + 1/4^2 + ... +", "plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in')", "s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1],", "%f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x,", "/4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%%", "plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic", "pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np", "reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot", "16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom in", "0.95), arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=-0.2\")) plt.plot(v,n[0,:],label=\"$k_BT=\\mu /1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\",", "n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as plt", "Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in')", "np A = 1; B = 2 m = 2; n = 3", "r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np e = 2.71828182845904523536 k =", "u, \"P\", c=\"#00B4DC\", alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% ####", "\"\"\" doc \"\"\" for i in range(0, count - 1): if i <", "- 1): if i < count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else:", "2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i = i +", "i < count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i", "Einstein's calculation import numpy as np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV", "= ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count =", "plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu", "potential\", color=\"#0A8CFF\") plt.plot(r, pot, label=\"Resulting potential\", color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import", "in range(0, count - 2): if i < count - 2 : R2[i]", "range(0,count): if i > 0 and i < count - 1: L4[i] =", "< count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i =", "Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1 L1 =", "8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\")", "\"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import numpy as np", "c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import numpy as np import", ": R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i = i + 1", "1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i = i + 1", "and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i =", "time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def RE1(Y): R1 = np.ones(count)", "0 i = i + 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\",", "16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\",", "1 ## L3 L3 = np.empty(count) for i in range(0,count): if i >", "R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count", "= np.linspace(1, 2, 11) u = lagrange_interpolate(x, y, t) td = time.time() -", "L1[i] = 0 i = i + 1 ## L2 L2 = np.empty(count)", "matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Einstein heat capacity per atom in three dimenson\"); plt.xlabel(r\"$k_BT/(\\hbar\\omega)$\");", "i = i + 1 ## L3 L3 = np.empty(count) for i in", "doc \"\"\" for i in range(0, count - 1): if i < count", "= i + 1 return R1 def RE2(Y): R2 = np.ones(count) for i", "1; B = 2 m = 2; n = 3 step = 0.01", "/1$\", color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\",", "#### Lagrange Interpolation Formula import time import numpy as np import matplotlib.pyplot as", "import matplotlib.pyplot as plt \"\"\" L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series", "np import matplotlib.pyplot as plt from scipy.interpolate import lagrange to = time.time() def", "else: L1[i] = 0 i = i + 1 ## L2 L2 =", "1 return R2 U1 = RE1(Y) U2 = RE2(Y) td = time.time() -", "count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i =", "i = i + 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\",", "Resulting potential pot = rep + atp import matplotlib; import matplotlib.pyplot as plt", "plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\",", "plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\")", "import time import matplotlib.pyplot as plt \"\"\" Sn = 1 + 1/2^2 +", "import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\":", "Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%%", "#%% #### Richardson Extrapolation import numpy as np import time import matplotlib.pyplot as", "plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy as np Interval = 0.01 reT", "alpha=0.9) plt.plot(x, y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import", "import matplotlib.pyplot as plt \"\"\" Sn = 1 + 1/2^2 + 1/3^2 +", "else: R1[i] = 0 i = i + 1 return R1 def RE2(Y):", "U1 = RE1(Y) U2 = RE2(Y) td = time.time() - to print(\"The time", "\"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t", "c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import numpy as np import", "potential import numpy as np A = 1; B = 2 m =", "= i + 1 ## L4 L4 = np.empty(count) for i in range(0,count):", "\"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic", "np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib;", "color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of", "L2 = np.empty(count) for i in range(0,count): if i > 0 and i", "plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory of gases import numpy as", "np.linspace(1, 2, 11) u = lagrange_interpolate(x, y, t) td = time.time() - to", "T = np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import", "= np.arange(0,1000,1) k = 1.380649e-23 m = np.matrix([2,28,32])*1.67e-27 v = np.sqrt(3*k*T/m.T) import matplotlib;", "of Gases\"); plt.xlabel(r\"Temperature ($\\rm{K}$)\"); plt.ylabel(r\"Melocule Velocity ($\\rm{ms^{-1}}$)\") plt.plot(T,np.array(v[0,:].T),label=r\"Hydrogen $\\rm{H_2}$\", color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\")", "#### Shanks Transform import numpy as np import matplotlib.pyplot as plt \"\"\" L[i]", "< count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0 i", "L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count", "1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\",", "scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4, 4); plt.plot(r,", "+ atp import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\",", "= rep + atp import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params =", "= A/(r**n) # Attractive potential atp = -B/(r**m) # Resulting potential pot =", "y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11) u = lagrange_interpolate(x, y,", "color=\"#0A8CFF\") plt.plot(T,np.array(v[1,:].T),label=r\"Nitrogen $\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import", "matplotlib.pyplot as plt \"\"\" Sn = 1 + 1/2^2 + 1/3^2 + 1/4^2", "= 0 i = i + 1 ## L2 L2 = np.empty(count) for", "1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y", "L4[i] = 0 i = i + 1 plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X,", "= {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic", "U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential", "print(\"The time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(X, Y,", "= 0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n) # Attractive", "color=\"#FA3C3C\") plt.plot(v,n[1,:],label=\"$k_BT=\\mu /2$\", color=\"#FA9C3D\") plt.plot(v,n[2,:],label=\"$k_BT=\\mu /4$\", color=\"#FADC14\") plt.plot(v,n[3,:],label=\"$k_BT=\\mu /8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\")", "potential atp = -B/(r**m) # Resulting potential pot = rep + atp import", "L3 = np.empty(count) for i in range(0,count): if i > 0 and i", "1/(e**np.array(a.T*np.matrix(v-1))+1) #%% #### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params", "≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time()", "R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to = time.time() X,Y =", "plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0));", "1 ## L2 L2 = np.empty(count) for i in range(0,count): if i >", "plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\",", "#%% #### Fermi-Dirac statistics import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params =", "plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0));", "#%% #### Interatonic potential import numpy as np A = 1; B =", "i < count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i] = 0", ": R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i = i + 1", "U2[0:len(U1)-2], c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import numpy as np", "count = len(X) def RE1(Y): R1 = np.ones(count) \"\"\" doc \"\"\" for i", "0 and i < count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] =", "theory of gases import numpy as np T = np.arange(0,1000,1) k = 1.380649e-23", "\"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0,", "= np.ones(count) for i in range(0, count - 2): if i < count", "r = np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n) # Attractive potential atp", "color=\"#6E64FA\") plt.plot(r, r*0, color=\"gray\", alpha=0.4); plt.legend(loc=\"best\") import numpy as np e = 2.71828182845904523536", "/8$\", color=\"#50F050\") plt.plot(v,n[4,:],label=\"$k_BT=\\mu /16$\", color=\"#0A8CFF\") plt.plot(v,n[5,:],label=\"$k_BT=\\mu /64$\", color=\"#6E64FA\") plt.legend(loc=\"best\") #%% #### Kinetic theory", "= reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\",", "potential rep = A/(r**n) # Attractive potential atp = -B/(r**m) # Resulting potential", "L1 = np.empty(count) for i in range(0,count): if i > 0 and i", "time interval is %f s.\" %td) plt.rcParams['font.family'] = 'CMU Serif' plt.plot(t, u, \"P\",", "Lagrange Interpolation Formula import time import numpy as np import matplotlib.pyplot as plt", "\"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Kinetic Theory of", "= 1 + 1/2^2 + 1/3^2 + 1/4^2 + ... + 1/n^2 lim(n->∞)Sn", "1 + 1/2^2 + 1/3^2 + 1/4^2 + ... + 1/n^2 lim(n->∞)Sn =", "= i + 1 ## L3 L3 = np.empty(count) for i in range(0,count):", "plt from scipy.interpolate import lagrange to = time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\"", "i < count-1: L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i = i", "= i + 1 return R2 U1 = RE1(Y) U2 = RE2(Y) td", "{\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\");", "time.time() def lagrange_interpolate(x_set, y_set, t_step): \"\"\" Lagrange interpolate \"\"\" p_re = lagrange(x_set, y_set)", "π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\" to =", "1/2^2 + 1/3^2 + 1/4^2 + ... + 1/n^2 lim(n->∞)Sn = π^2/6 ≈", "numpy as np import matplotlib.pyplot as plt from scipy.interpolate import lagrange to =", "params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True)", "…… \"\"\" to = time.time() X,Y = np.loadtxt('Data/Td.dat', unpack=True) count = len(X) def", "alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import numpy as np A =", "i + 1 ## L3 L3 = np.empty(count) for i in range(0,count): if", "1): if i < count - 1 : R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1) else: R1[i]", "A = 1; B = 2 m = 2; n = 3 step", "else: L3[i] = 0 i = i + 1 ## L4 L4 =", "4); plt.plot(r, rep, label=\"Repulsive potential\", color=\"#FA3C3C\") plt.plot(r, atp, label=\"Attractive potential\", color=\"#0A8CFF\") plt.plot(r, pot,", "plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson", "and i < count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0", "alpha=0.9) plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c=\"#FFC814\", alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9)", "= (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1]) else: L2[i] = 0 i = i + 1 ## L3", "np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n) # Attractive potential atp = -B/(r**m)", "$\\rm{N_2}$\", color=\"#FA3C3C\") plt.plot(T,np.array(v[2,:].T),label=r\"Oxygen $\\rm{O_2}$\", color=\"#50F050\"); plt.legend(loc=\"best\") #%% #### Einstein's calculation import numpy as", "c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import", "np import time import matplotlib.pyplot as plt \"\"\" Sn = 1 + 1/2^2", "= (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0 i = i + 1 ## L2", "else: L2[i] = 0 i = i + 1 ## L3 L3 =", "< count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i", "i in range(0,count): if i > 0 and i < count-1: L2[i] =", "= {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The", "as np import time import matplotlib.pyplot as plt \"\"\" Sn = 1 +", "#### Einstein's calculation import numpy as np Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval)", "as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params)", "U2 = RE2(Y) td = time.time() - to print(\"The time interval is %f", "= np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n) # Attractive potential atp =", "((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else: R2[i] = 0 i = i + 1 return R2 U1", "plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12); plt.xlabel(r\"$E/\\mu$\"); plt.ylabel(r\"$\\langle{n_i}\\rangle$\", rotation=0) plt.plot(([1,1]),([0,1]), \"--\", color=\"#A0A0A0\");", "R2 U1 = RE1(Y) U2 = RE2(Y) td = time.time() - to print(\"The", "x, y = np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11) u = lagrange_interpolate(x,", "i in range(0,count): if i > 0 and i < count - 1:", "0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n) # Attractive potential", "\"axes.labelsize\":14, \"figure.facecolor\":\"w\"}; matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.suptitle(\"The Fermi-Dirac Distribution\", fontsize=16); plt.title(\"Energy dependence\", fontsize=12);", "as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"white\"} matplotlib.rcParams.update(params)", "plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import numpy as np import matplotlib.pyplot as", "L4 = np.empty(count) for i in range(0,count): if i > 0 and i", "A/(r**n) # Attractive potential atp = -B/(r**m) # Resulting potential pot = rep", "y, \"o\", c=\"#32B432\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Shanks Transform import numpy as", "matplotlib.rcParams.update(params) plt.ticklabel_format(style=\"sci\", scilimits=(0,0)); plt.tick_params(direction=\"in\",top=True,right=True,bottom=True,left=True) plt.title(\"Interatomic potential\"); plt.xlabel(r\"Interatomic distance\"); plt.ylabel(r\"Interatomic potential\") plt.xlim(0, 8); plt.ylim(-4,", "alpha=0.9) plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c=\"#1978F0\", alpha=0.9) plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c=\"#A064DC\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% ####", "Sn = 1 + 1/2^2 + 1/3^2 + 1/4^2 + ... + 1/n^2", "> 0 and i < count-1: L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1]) else: L1[i] = 0", "- 2): if i < count - 2 : R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2) else:", "rep = A/(r**n) # Attractive potential atp = -B/(r**m) # Resulting potential pot", "y, t) td = time.time() - to print(\"The time interval is %f s.\"", "Interval = 0.01 reT = np.arange(Interval,2+Interval,Interval) reV = np.power(reT,-1) reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1) import", "> 0 and i < count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i]", "= 1.380649e-23 v = np.array(np.arange(0,2,0.001)) a = np.matrix([1,2,4,8,16,32,64]) n = 1/(e**np.array(a.T*np.matrix(v-1))+1) #%% ####", "= 0 i = i + 1 return R2 U1 = RE1(Y) U2", "step = 0.01 r = np.array(np.arange(step,10,step)) # Repulsive potential rep = A/(r**n) #", "np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1 L1 = np.empty(count) for i in", "lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) …… \"\"\"", "potential pot = rep + atp import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192);", "Serif' plt.plot(X, Y, \"o\", c=\"#B4B4B4\", alpha=0.9) plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c=\"#FF1E14\", alpha=0.9) plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c=\"#1978F0\",", "c=\"#1978F0\", alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Interatonic potential import numpy as np A", "Shanks Transform import numpy as np import matplotlib.pyplot as plt \"\"\" L[i] =", "count - 1: L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i = i", "L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1]) else: L4[i] = 0 i = i + 1 plt.rcParams['font.family']", "= np.loadtxt('Data/dataSim.dat', unpack=True) t = np.linspace(1, 2, 11) u = lagrange_interpolate(x, y, t)", "Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X) ## L1 L1 = np.empty(count)", "alpha=0.9) plt.tick_params(direction='in') plt.show() #%% #### Richardson Extrapolation import numpy as np import time", "interpolate \"\"\" p_re = lagrange(x_set, y_set) return p_re(t_step) x, y = np.loadtxt('Data/dataSim.dat', unpack=True)", "(S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1]) \"\"\" # Load Harmonic Series X,Y = np.loadtxt('Data/Tr.dat', unpack=True) count = len(X)", "if i > 0 and i < count - 1: L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1])", "## L1 L1 = np.empty(count) for i in range(0,count): if i > 0", "atp import matplotlib; import matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\",", "m = 2; n = 3 step = 0.01 r = np.array(np.arange(step,10,step)) #", "matplotlib.pyplot as plt plt.figure(dpi=192); params = {\"text.usetex\":True, \"font.family\":\"serif\", \"mathtext.fontset\":\"cm\", \"axes.titlesize\": 16, \"axes.labelsize\":14, \"figure.facecolor\":\"w\"}", "(L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1]) else: L3[i] = 0 i = i + 1 ## L4 L4", "1/n^2 lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264 R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1) R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2) ……", "2, 11) u = lagrange_interpolate(x, y, t) td = time.time() - to print(\"The" ]
[ "BigY = [] for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i]", "np.ptp(ObjX) s2 = np.size(BigX) BigY = [] for i in range(0,s2): fKs =", "(ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY = [] for i in", "ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY = [] for", "Algorithm/CalculateGreatness.py import numpy as np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX)", "CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY = []", "[] for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs", "<filename>Artificial Algae Algorithm/CalculateGreatness.py import numpy as np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX -", "import numpy as np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2", "= (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY = [] for i", "np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY", "= np.size(BigX) BigY = [] for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M", "i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs + ObjX[i]))", "np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY = [] for i in range(0,s2): fKs", "in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs + ObjX[i])) dX", "Algae Algorithm/CalculateGreatness.py import numpy as np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/", "= np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs + ObjX[i])) dX = M *", "= (ObjX[i] / (fKs + ObjX[i])) dX = M * BigX[:,i] BigX[:,i] =", "s2 = np.size(BigX) BigY = [] for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0)", "np.size(BigX) BigY = [] for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M =", "np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs + ObjX[i])) dX = M * BigX[:,i]", "(fKs + ObjX[i])) dX = M * BigX[:,i] BigX[:,i] = BigX[:,i] + dX", "+ ObjX[i])) dX = M * BigX[:,i] BigX[:,i] = BigX[:,i] + dX return", "def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY =", "for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs +", "= [] for i in range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] /", "fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs + ObjX[i])) dX = M", "M = (ObjX[i] / (fKs + ObjX[i])) dX = M * BigX[:,i] BigX[:,i]", "range(0,s2): fKs = np.abs(BigX[:,i]/2.0) M = (ObjX[i] / (fKs + ObjX[i])) dX =", "as np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX)", "(ObjX[i] / (fKs + ObjX[i])) dX = M * BigX[:,i] BigX[:,i] = BigX[:,i]", "/ (fKs + ObjX[i])) dX = M * BigX[:,i] BigX[:,i] = BigX[:,i] +", "numpy as np def CalculateGreatness(BigX,ObjX): ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX) s2 =", "ObjX[i])) dX = M * BigX[:,i] BigX[:,i] = BigX[:,i] + dX return BigX", "- np.min(ObjX))/ np.ptp(ObjX) s2 = np.size(BigX) BigY = [] for i in range(0,s2):" ]
[ "division(2, c) return d else: value = int(division(2, num_values)) e = data[value] return", "num_values = len(data) if num_values % 2 == 0: value = int(division(2, num_values))", "0: value = int(division(2, num_values)) a = data[value] value = value - 1", "<reponame>sh667/statistical-calculator from Calculator.Division import division from Calculator.Addition import addition def get_median(data): num_values =", "division from Calculator.Addition import addition def get_median(data): num_values = len(data) if num_values %", "Calculator.Addition import addition def get_median(data): num_values = len(data) if num_values % 2 ==", "= int(division(2, num_values)) a = data[value] value = value - 1 b =", "value = value - 1 b = data[value] c = addition(b, a) d", "data[value] c = addition(b, a) d = division(2, c) return d else: value", "get_median(data): num_values = len(data) if num_values % 2 == 0: value = int(division(2,", "value - 1 b = data[value] c = addition(b, a) d = division(2,", "a) d = division(2, c) return d else: value = int(division(2, num_values)) e", "num_values)) a = data[value] value = value - 1 b = data[value] c", "c) return d else: value = int(division(2, num_values)) e = data[value] return e", "== 0: value = int(division(2, num_values)) a = data[value] value = value -", "data[value] value = value - 1 b = data[value] c = addition(b, a)", "value = int(division(2, num_values)) a = data[value] value = value - 1 b", "if num_values % 2 == 0: value = int(division(2, num_values)) a = data[value]", "2 == 0: value = int(division(2, num_values)) a = data[value] value = value", "d = division(2, c) return d else: value = int(division(2, num_values)) e =", "a = data[value] value = value - 1 b = data[value] c =", "def get_median(data): num_values = len(data) if num_values % 2 == 0: value =", "= addition(b, a) d = division(2, c) return d else: value = int(division(2,", "= len(data) if num_values % 2 == 0: value = int(division(2, num_values)) a", "Calculator.Division import division from Calculator.Addition import addition def get_median(data): num_values = len(data) if", "from Calculator.Division import division from Calculator.Addition import addition def get_median(data): num_values = len(data)", "= value - 1 b = data[value] c = addition(b, a) d =", "import division from Calculator.Addition import addition def get_median(data): num_values = len(data) if num_values", "= data[value] value = value - 1 b = data[value] c = addition(b,", "import addition def get_median(data): num_values = len(data) if num_values % 2 == 0:", "% 2 == 0: value = int(division(2, num_values)) a = data[value] value =", "- 1 b = data[value] c = addition(b, a) d = division(2, c)", "= data[value] c = addition(b, a) d = division(2, c) return d else:", "len(data) if num_values % 2 == 0: value = int(division(2, num_values)) a =", "addition(b, a) d = division(2, c) return d else: value = int(division(2, num_values))", "b = data[value] c = addition(b, a) d = division(2, c) return d", "= division(2, c) return d else: value = int(division(2, num_values)) e = data[value]", "int(division(2, num_values)) a = data[value] value = value - 1 b = data[value]", "c = addition(b, a) d = division(2, c) return d else: value =", "from Calculator.Addition import addition def get_median(data): num_values = len(data) if num_values % 2", "1 b = data[value] c = addition(b, a) d = division(2, c) return", "addition def get_median(data): num_values = len(data) if num_values % 2 == 0: value", "num_values % 2 == 0: value = int(division(2, num_values)) a = data[value] value" ]
[ "# Parmeters payload: The payload for which the signature should be computed. secret:", "= provided self.computed = computed def __str__(self) -> str: return f'{self._MSG}\\n provided: {self.provided}\\n", "{algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload: bytes, secret:", "provided: str, computed: str) -> None: self.provided = provided self.computed = computed def", "class SignatureMismatchException(Exception): \"\"\" Raised if a signature can not be verified with #check_signatuer().", "HMAC signature of *payload* given the specified *secret* and the given hashing *algo*.", "raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret, payload,", "'The provided signature does not match the computed signature of the payload.' def", "hmac def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str: \"\"\"", "algorithm to use, must be `sha1` or `sha256`. \"\"\" if algo not in", "to generate the signature. algo: The hash algorithm to use, must be `sha1`", "uses constant-time string comparison to prevent timing analysis. \"\"\" computed = compute_signature(payload, secret,", "not match the computed signature of the payload.' def __init__(self, provided: str, computed:", "= compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception):", "This function uses constant-time string comparison to prevent timing analysis. \"\"\" computed =", "with #check_signatuer(). \"\"\" _MSG = 'The provided signature does not match the computed", "the HMAC signature of *payload* given the specified *secret* and the given hashing", "or `sha256`. \"\"\" if algo not in ('sha1', 'sha256'): raise ValueError(f'algo must be", "-> str: \"\"\" Computes the HMAC signature of *payload* given the specified *secret*", "payload: bytes, secret: bytes, algo: str = 'sha256') -> None: \"\"\" Compares the", "signature. algo: The hash algorithm to use, must be `sha1` or `sha256`. \"\"\"", "in conjunction to generate the signature. algo: The hash algorithm to use, must", "#SignatureMismatchException if they do not match. This function uses constant-time string comparison to", "\"\"\" Helper to check the signature of a GitHub event request. \"\"\" import", "that is used in conjunction to generate the signature. algo: The hash algorithm", "str, payload: bytes, secret: bytes, algo: str = 'sha256') -> None: \"\"\" Compares", "the computed signature of the payload.' def __init__(self, provided: str, computed: str) ->", "verified with #check_signatuer(). \"\"\" _MSG = 'The provided signature does not match the", "be computed. secret: The secret string that is used in conjunction to generate", "Computes the HMAC signature of *payload* given the specified *secret* and the given", "algo: The hash algorithm to use, must be `sha1` or `sha256`. \"\"\" if", "to use, must be `sha1` or `sha256`. \"\"\" if algo not in ('sha1',", "f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload: bytes, secret: bytes, algo:", "given hashing *algo*. # Parmeters payload: The payload for which the signature should", "The hash algorithm to use, must be `sha1` or `sha256`. \"\"\" if algo", "_MSG = 'The provided signature does not match the computed signature of the", "must be {{sha1, sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def", "secret: The secret string that is used in conjunction to generate the signature.", "hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload: bytes, secret: bytes, algo: str =", "signature of the *payload* and raises a #SignatureMismatchException if they do not match.", "of the *payload* and raises a #SignatureMismatchException if they do not match. This", "computed signature of the *payload* and raises a #SignatureMismatchException if they do not", "computed. secret: The secret string that is used in conjunction to generate the", "= 'sha256') -> str: \"\"\" Computes the HMAC signature of *payload* given the", "('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}') return f'{algo}=' +", "conjunction to generate the signature. algo: The hash algorithm to use, must be", "\"\"\" Raised if a signature can not be verified with #check_signatuer(). \"\"\" _MSG", "Parmeters payload: The payload for which the signature should be computed. secret: The", "not match. This function uses constant-time string comparison to prevent timing analysis. \"\"\"", "bytes, algo: str = 'sha256') -> str: \"\"\" Computes the HMAC signature of", "request. \"\"\" import hmac def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256')", "check_signature(sig: str, payload: bytes, secret: bytes, algo: str = 'sha256') -> None: \"\"\"", "The payload for which the signature should be computed. secret: The secret string", "they do not match. This function uses constant-time string comparison to prevent timing", "signature should be computed. secret: The secret string that is used in conjunction", "timing analysis. \"\"\" computed = compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed): raise", "not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if a signature", "be `sha1` or `sha256`. \"\"\" if algo not in ('sha1', 'sha256'): raise ValueError(f'algo", "for which the signature should be computed. secret: The secret string that is", "the *payload* and raises a #SignatureMismatchException if they do not match. This function", "str: \"\"\" Computes the HMAC signature of *payload* given the specified *secret* and", "*secret* and the given hashing *algo*. # Parmeters payload: The payload for which", "signature can not be verified with #check_signatuer(). \"\"\" _MSG = 'The provided signature", "computed signature of the payload.' def __init__(self, provided: str, computed: str) -> None:", "algo).hexdigest() def check_signature(sig: str, payload: bytes, secret: bytes, algo: str = 'sha256') ->", "__init__(self, provided: str, computed: str) -> None: self.provided = provided self.computed = computed", "None: \"\"\" Compares the porivided signature *sig* with the computed signature of the", "signature of *payload* given the specified *secret* and the given hashing *algo*. #", "the given hashing *algo*. # Parmeters payload: The payload for which the signature", "`sha1` or `sha256`. \"\"\" if algo not in ('sha1', 'sha256'): raise ValueError(f'algo must", "the signature. algo: The hash algorithm to use, must be `sha1` or `sha256`.", "a signature can not be verified with #check_signatuer(). \"\"\" _MSG = 'The provided", "if a signature can not be verified with #check_signatuer(). \"\"\" _MSG = 'The", "the signature should be computed. secret: The secret string that is used in", "computed: str) -> None: self.provided = provided self.computed = computed def __str__(self) ->", "a GitHub event request. \"\"\" import hmac def compute_signature(payload: bytes, secret: bytes, algo:", "\"\"\" if algo not in ('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1, sha256}},", "the computed signature of the *payload* and raises a #SignatureMismatchException if they do", "Raised if a signature can not be verified with #check_signatuer(). \"\"\" _MSG =", "the payload.' def __init__(self, provided: str, computed: str) -> None: self.provided = provided", "compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\"", "of the payload.' def __init__(self, provided: str, computed: str) -> None: self.provided =", "the specified *secret* and the given hashing *algo*. # Parmeters payload: The payload", "secret string that is used in conjunction to generate the signature. algo: The", "-> None: \"\"\" Compares the porivided signature *sig* with the computed signature of", "match. This function uses constant-time string comparison to prevent timing analysis. \"\"\" computed", "hashing *algo*. # Parmeters payload: The payload for which the signature should be", "which the signature should be computed. secret: The secret string that is used", "with the computed signature of the *payload* and raises a #SignatureMismatchException if they", "{{sha1, sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str,", "computed) class SignatureMismatchException(Exception): \"\"\" Raised if a signature can not be verified with", "compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str: \"\"\" Computes the", "used in conjunction to generate the signature. algo: The hash algorithm to use,", "bytes, algo: str = 'sha256') -> None: \"\"\" Compares the porivided signature *sig*", "provided self.computed = computed def __str__(self) -> str: return f'{self._MSG}\\n provided: {self.provided}\\n computed:", "def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str: \"\"\" Computes", "payload for which the signature should be computed. secret: The secret string that", "return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload: bytes, secret: bytes,", "should be computed. secret: The secret string that is used in conjunction to", "= 'The provided signature does not match the computed signature of the payload.'", "self.provided = provided self.computed = computed def __str__(self) -> str: return f'{self._MSG}\\n provided:", "signature of a GitHub event request. \"\"\" import hmac def compute_signature(payload: bytes, secret:", "bytes, secret: bytes, algo: str = 'sha256') -> str: \"\"\" Computes the HMAC", "algo not in ('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}')", "signature of the payload.' def __init__(self, provided: str, computed: str) -> None: self.provided", "to check the signature of a GitHub event request. \"\"\" import hmac def", "constant-time string comparison to prevent timing analysis. \"\"\" computed = compute_signature(payload, secret, algo)", "check the signature of a GitHub event request. \"\"\" import hmac def compute_signature(payload:", "is used in conjunction to generate the signature. algo: The hash algorithm to", "porivided signature *sig* with the computed signature of the *payload* and raises a", "GitHub event request. \"\"\" import hmac def compute_signature(payload: bytes, secret: bytes, algo: str", "and the given hashing *algo*. # Parmeters payload: The payload for which the", "a #SignatureMismatchException if they do not match. This function uses constant-time string comparison", "function uses constant-time string comparison to prevent timing analysis. \"\"\" computed = compute_signature(payload,", "SignatureMismatchException(Exception): \"\"\" Raised if a signature can not be verified with #check_signatuer(). \"\"\"", "to prevent timing analysis. \"\"\" computed = compute_signature(payload, secret, algo) if not hmac.compare_digest(sig,", "SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if a signature can not be verified", "= 'sha256') -> None: \"\"\" Compares the porivided signature *sig* with the computed", "secret: bytes, algo: str = 'sha256') -> None: \"\"\" Compares the porivided signature", "payload, algo).hexdigest() def check_signature(sig: str, payload: bytes, secret: bytes, algo: str = 'sha256')", "None: self.provided = provided self.computed = computed def __str__(self) -> str: return f'{self._MSG}\\n", "use, must be `sha1` or `sha256`. \"\"\" if algo not in ('sha1', 'sha256'):", "payload.' def __init__(self, provided: str, computed: str) -> None: self.provided = provided self.computed", "`sha256`. \"\"\" if algo not in ('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1,", "raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if a signature can not be", "be {{sha1, sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig:", "Compares the porivided signature *sig* with the computed signature of the *payload* and", "hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if a signature can", "the porivided signature *sig* with the computed signature of the *payload* and raises", "*sig* with the computed signature of the *payload* and raises a #SignatureMismatchException if", "not in ('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}') return", "specified *secret* and the given hashing *algo*. # Parmeters payload: The payload for", "str) -> None: self.provided = provided self.computed = computed def __str__(self) -> str:", "self.computed = computed def __str__(self) -> str: return f'{self._MSG}\\n provided: {self.provided}\\n computed: {self.computed}'", "string comparison to prevent timing analysis. \"\"\" computed = compute_signature(payload, secret, algo) if", "The secret string that is used in conjunction to generate the signature. algo:", "str = 'sha256') -> None: \"\"\" Compares the porivided signature *sig* with the", "\"\"\" _MSG = 'The provided signature does not match the computed signature of", "in ('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}') return f'{algo}='", "and raises a #SignatureMismatchException if they do not match. This function uses constant-time", "comparison to prevent timing analysis. \"\"\" computed = compute_signature(payload, secret, algo) if not", "analysis. \"\"\" computed = compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig,", "must be `sha1` or `sha256`. \"\"\" if algo not in ('sha1', 'sha256'): raise", "algo: str = 'sha256') -> None: \"\"\" Compares the porivided signature *sig* with", "algo: str = 'sha256') -> str: \"\"\" Computes the HMAC signature of *payload*", "'sha256') -> str: \"\"\" Computes the HMAC signature of *payload* given the specified", "\"\"\" Compares the porivided signature *sig* with the computed signature of the *payload*", "do not match. This function uses constant-time string comparison to prevent timing analysis.", "the signature of a GitHub event request. \"\"\" import hmac def compute_signature(payload: bytes,", "#check_signatuer(). \"\"\" _MSG = 'The provided signature does not match the computed signature", "*algo*. # Parmeters payload: The payload for which the signature should be computed.", "Helper to check the signature of a GitHub event request. \"\"\" import hmac", "given the specified *secret* and the given hashing *algo*. # Parmeters payload: The", "sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload:", "of a GitHub event request. \"\"\" import hmac def compute_signature(payload: bytes, secret: bytes,", "payload: The payload for which the signature should be computed. secret: The secret", "prevent timing analysis. \"\"\" computed = compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed):", "str, computed: str) -> None: self.provided = provided self.computed = computed def __str__(self)", "if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if a", "ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest()", "signature does not match the computed signature of the payload.' def __init__(self, provided:", "can not be verified with #check_signatuer(). \"\"\" _MSG = 'The provided signature does", "-> None: self.provided = provided self.computed = computed def __str__(self) -> str: return", "+ hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload: bytes, secret: bytes, algo: str", "provided signature does not match the computed signature of the payload.' def __init__(self,", "'sha256'): raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}') return f'{algo}=' + hmac.new(secret,", "computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if a signature can not", "be verified with #check_signatuer(). \"\"\" _MSG = 'The provided signature does not match", "'sha256') -> None: \"\"\" Compares the porivided signature *sig* with the computed signature", "generate the signature. algo: The hash algorithm to use, must be `sha1` or", "*payload* and raises a #SignatureMismatchException if they do not match. This function uses", "string that is used in conjunction to generate the signature. algo: The hash", "raises a #SignatureMismatchException if they do not match. This function uses constant-time string", "import hmac def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str:", "got {algo!r}') return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest() def check_signature(sig: str, payload: bytes,", "match the computed signature of the payload.' def __init__(self, provided: str, computed: str)", "\"\"\" computed = compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed)", "secret: bytes, algo: str = 'sha256') -> str: \"\"\" Computes the HMAC signature", "if algo not in ('sha1', 'sha256'): raise ValueError(f'algo must be {{sha1, sha256}}, got", "not be verified with #check_signatuer(). \"\"\" _MSG = 'The provided signature does not", "str = 'sha256') -> str: \"\"\" Computes the HMAC signature of *payload* given", "\"\"\" Computes the HMAC signature of *payload* given the specified *secret* and the", "def __init__(self, provided: str, computed: str) -> None: self.provided = provided self.computed =", "of *payload* given the specified *secret* and the given hashing *algo*. # Parmeters", "algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised if", "if they do not match. This function uses constant-time string comparison to prevent", "secret, algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class SignatureMismatchException(Exception): \"\"\" Raised", "event request. \"\"\" import hmac def compute_signature(payload: bytes, secret: bytes, algo: str =", "signature *sig* with the computed signature of the *payload* and raises a #SignatureMismatchException", "computed = compute_signature(payload, secret, algo) if not hmac.compare_digest(sig, computed): raise SignatureMismatchException(sig, computed) class", "def check_signature(sig: str, payload: bytes, secret: bytes, algo: str = 'sha256') -> None:", "*payload* given the specified *secret* and the given hashing *algo*. # Parmeters payload:", "does not match the computed signature of the payload.' def __init__(self, provided: str,", "hash algorithm to use, must be `sha1` or `sha256`. \"\"\" if algo not", "\"\"\" import hmac def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') ->", "bytes, secret: bytes, algo: str = 'sha256') -> None: \"\"\" Compares the porivided" ]
[ "commodore.config import Config from commodore.component import Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\")", "= component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\", work_dir=tmp_path) assert d.is_dir() helpers.clean_working_tree(cfg) assert d.is_dir()", "commodore.component import Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\"", "as helpers from commodore.config import Config from commodore.component import Component, component_dir def test_apierror():", "for helpers \"\"\" from pathlib import Path import commodore.helpers as helpers from commodore.config", "from commodore.component import Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" ==", "\"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\" == \"test2\" def", "cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\", work_dir=tmp_path)", "commodore.helpers as helpers from commodore.config import Config from commodore.component import Component, component_dir def", "Path import commodore.helpers as helpers from commodore.config import Config from commodore.component import Component,", "def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not", "<filename>tests/test_helpers.py \"\"\" Unit-tests for helpers \"\"\" from pathlib import Path import commodore.helpers as", "def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except", "e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d", "\"\"\" Unit-tests for helpers \"\"\" from pathlib import Path import commodore.helpers as helpers", "try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path:", "import Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try:", "assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d =", "as e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs()", "Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\", work_dir=tmp_path) assert d.is_dir()", "test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError", "cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\", work_dir=tmp_path) assert d.is_dir() helpers.clean_working_tree(cfg)", "test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not d.is_dir()", "component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\")", "Config from commodore.component import Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\"", "from commodore.config import Config from commodore.component import Component, component_dir def test_apierror(): e =", "helpers \"\"\" from pathlib import Path import commodore.helpers as helpers from commodore.config import", "helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg", "import commodore.helpers as helpers from commodore.config import Config from commodore.component import Component, component_dir", "pathlib import Path import commodore.helpers as helpers from commodore.config import Config from commodore.component", "== \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\" == \"test2\"", "f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\" ==", "f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path,", "Unit-tests for helpers \"\"\" from pathlib import Path import commodore.helpers as helpers from", "== \"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\")", "assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\"", "= helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2:", "helpers from commodore.config import Config from commodore.component import Component, component_dir def test_apierror(): e", "= Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\", work_dir=tmp_path) assert", "import Config from commodore.component import Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert", "raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path):", "helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as e2: assert", "Component, component_dir def test_apierror(): e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise", "e = helpers.ApiError(\"test\") assert f\"{e}\" == \"test\" try: raise helpers.ApiError(\"test2\") except helpers.ApiError as", "helpers.ApiError as e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path)", "except helpers.ApiError as e2: assert f\"{e2}\" == \"test2\" def test_clean_working_tree(tmp_path: Path): cfg =", "\"\"\" from pathlib import Path import commodore.helpers as helpers from commodore.config import Config", "from pathlib import Path import commodore.helpers as helpers from commodore.config import Config from", "d = component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\", work_dir=tmp_path) assert d.is_dir() helpers.clean_working_tree(cfg) assert", "Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert not d.is_dir() Component(\"test\",", "import Path import commodore.helpers as helpers from commodore.config import Config from commodore.component import", "\"test2\" def test_clean_working_tree(tmp_path: Path): cfg = Config(work_dir=tmp_path) cfg.inventory.ensure_dirs() d = component_dir(tmp_path, \"test\") assert" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "__future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation,", "License. # You may obtain a copy of the License at # #", "from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from", "ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph', 'NNEFOperation', 'NNEFTensor', 'ONNXGraph', 'ONNXOperation', 'ONNXTensor',", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph', 'NNEFOperation', 'NNEFTensor', 'ONNXGraph', 'ONNXOperation', 'ONNXTensor', ]", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import", "you may not use this file except in compliance with the License. #", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "ANY KIND, either express or implied. # See the License for the specific", "division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter", "ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph', 'NNEFOperation', 'NNEFTensor', 'ONNXGraph', 'ONNXOperation',", "The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph', 'NNEFOperation', 'NNEFTensor', 'ONNXGraph',", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph', 'NNEFOperation', 'NNEFTensor',", "import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import", "Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "specific language governing permissions and # limitations under the License. from __future__ import", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", "# Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the", "# you may not use this file except in compliance with the License.", "Group Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph", "agreed to in writing, software # distributed under the License is distributed on", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import", "from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from", "as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation,", "from __future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from", "(the \"License\"); # you may not use this file except in compliance with", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "for the specific language governing permissions and # limitations under the License. from", "except in compliance with the License. # You may obtain a copy of", "language governing permissions and # limitations under the License. from __future__ import division,", "by applicable law or agreed to in writing, software # distributed under the", "NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor", "Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph,", "import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "either express or implied. # See the License for the specific language governing", "from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph', 'NNEFOperation',", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "2017 The Khronos Group Inc. # # Licensed under the Apache License, Version", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter',", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "file except in compliance with the License. # You may obtain a copy", "the License. from __future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as", "the specific language governing permissions and # limitations under the License. from __future__", "NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter', 'NNEFGraph',", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the License. # You may obtain a copy of the License at #", "to in writing, software # distributed under the License is distributed on an", "absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ = [ 'ONNXToNNEFConverter', 'NNEFToONNXConverter',", "\"License\"); # you may not use this file except in compliance with the", "Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "limitations under the License. from __future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import", "applicable law or agreed to in writing, software # distributed under the License", "print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as", "permissions and # limitations under the License. from __future__ import division, print_function, absolute_import", "under the License. from __future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter", "nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__ =", "governing permissions and # limitations under the License. from __future__ import division, print_function,", "from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor __all__", "<reponame>rgiduthuri/NNEF-Tools # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "License. from __future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter", "ONNXToNNEFConverter from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "(c) 2017 The Khronos Group Inc. # # Licensed under the Apache License,", "in writing, software # distributed under the License is distributed on an \"AS", "# limitations under the License. from __future__ import division, print_function, absolute_import from nnef_tools.conversion.onnx.nnef_to_onnx", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "and # limitations under the License. from __future__ import division, print_function, absolute_import from" ]
[ "has been stopped server = ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs =", "-*- coding: utf-8 -*- import io import os import requests from httpdbg.httpdbg import", "ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] ==", ") # to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need", "os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to", "stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url +", "run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs", "requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url", "def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\",", "_test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\",", "os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"])", "== httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys):", ") run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\")", "= requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] ==", "= os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test)", "import io import os import requests from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest", "ServerThread, app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils import", "== httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch):", "+ \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] =", "== httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join(", "== 3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url +", "ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"]", "we need to restart a new httpdbg server as the previous has been", "len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url", ") run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg()", "script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"]", "os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\",", "assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert", "\"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs =", "3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\"", "run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin):", "os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret", "def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run,", "\"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\"", "+ \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test():", "reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def", "app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert", "= os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] )", "reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin,", "\"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to terminate the httpdbg server", "httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)),", "script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs", "def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" )", "\"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert", "reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join(", "\"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"]", "# we need to restart a new httpdbg server as the previous has", "script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port =", ") monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to terminate the", "import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin): def", "# -*- coding: utf-8 -*- import io import os import requests from httpdbg.httpdbg", "\"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret =", "ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) == 3", "io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart a new httpdbg server as the", "from utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run =", "httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart a new httpdbg", "= os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test,", "httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url", "_run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\"", "test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"])", "import requests from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__", "coding: utf-8 -*- import io import os import requests from httpdbg.httpdbg import ServerThread,", "requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 0 assert \"fixture_which_do_not_exists\" in capsys.readouterr().out", "\"-k\", \"test_demo\"] ) # to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() #", "server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run,", "= requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 0 assert \"fixture_which_do_not_exists\" in", "assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run =", "+ \"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url +", "script_to_run, \"-k\", \"test_demo\"] ) # to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point()", "httpdbg server as the previous has been stopped server = ServerThread(6000, app) server.start()", "requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url +", "as the previous has been stopped server = ServerThread(6000, app) server.start() ret =", "app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg", "httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from", "_run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 3", "reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"]", "reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\"", "\"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to terminate", "the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart a new", "# to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to", "pyhttpdbg_entry_point() # we need to restart a new httpdbg server as the previous", "import os import requests from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import run_pytest", "= _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) ==", "server as the previous has been stopped server = ServerThread(6000, app) server.start() ret", "= _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 0", "[\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\",", "os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) #", "_test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port", "httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def", "server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"]", "requests from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import", "stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs)", "monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart a new httpdbg server as", "= ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) ==", "httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port", "+ \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\"", "\"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url", "assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown()", "def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg,", "assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def", "httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin):", "from httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"]", "import ServerThread, app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils", "def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr(", "\"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"]", "os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret", "restart a new httpdbg server as the previous has been stopped server =", "httpbin.url + \"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"]", "httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert", "new httpdbg server as the previous has been stopped server = ServerThread(6000, app)", "pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run", "= httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run,", "io import os import requests from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import", "_run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 0 assert", "current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) ==", "stopped server = ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert", "utf-8 -*- import io import os import requests from httpdbg.httpdbg import ServerThread, app", "script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port =", "= ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert", "httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] =", "a new httpdbg server as the previous has been stopped server = ServerThread(6000,", "monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\",", "script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs =", "os import requests from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import run_pytest from", "assert reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run", "+ \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" )", "httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\"", "the previous has been stopped server = ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\")", "\"/get\" assert reqs[2][\"uri\"] == httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run", "terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart a", "-*- import io import os import requests from httpdbg.httpdbg import ServerThread, app from", "ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"]", "stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert", "to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart", "utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join(", "to restart a new httpdbg server as the previous has been stopped server", "server = ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs)", "= requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"] assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url", "current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs)", "from httpdbg.httpdbg import ServerThread, app from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point", "test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\",", "test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\",", "== httpbin.url + \"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)),", "ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg() reqs = ret.json()[\"requests\"] assert len(reqs) == 0 assert \"fixture_which_do_not_exists\"", "server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we need to restart a new httpdbg server", "\"/put\" server.shutdown() def test_run_pytest_with_exception(capsys): def _test(): script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\",", "os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret =", "\"test_demo\"] ) # to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\")) pyhttpdbg_entry_point() # we", "from httpdbg.mode_pytest import run_pytest from httpdbg.__main__ import pyhttpdbg_entry_point from utils import _run_under_httpdbg def", "import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)),", "run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\") stop_httpdbg()", "assert len(reqs) == 3 assert reqs[0][\"uri\"] == httpbin.url + \"/post\" assert reqs[1][\"uri\"] ==", "need to restart a new httpdbg server as the previous has been stopped", "been stopped server = ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs = ret.json()[\"requests\"]", "= httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg,", "os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin)", "monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to terminate the httpdbg", "\"demo_run_pytest.py\" ) run_pytest([\"pytest\", script_to_run, \"-k\", \"test_demo_raise_exception\"]) stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test) ret = requests.get(f\"http://127.0.0.1:{current_httpdbg_port}/requests\")", "httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" ) monkeypatch.setattr( \"sys.argv\", [\"pyhttpdb\", \"pytest\", script_to_run, \"-k\",", "previous has been stopped server = ServerThread(6000, app) server.start() ret = requests.get(\"http://127.0.0.1:6000/requests\") reqs", "\"pytest\", script_to_run, \"-k\", \"test_demo\"] ) # to terminate the httpdbg server monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"\\n\"))", "reqs[2][\"uri\"] == httpbin.url + \"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run =", "== httpbin.url + \"/post\" assert reqs[1][\"uri\"] == httpbin.url + \"/get\" assert reqs[2][\"uri\"] ==", "\"/put\" def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url script_to_run = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"demo_run_pytest.py\" )", "import pyhttpdbg_entry_point from utils import _run_under_httpdbg def test_run_pytest(httpbin): def _test(httpbin): os.environ[\"HTTPDBG_TEST_PYTEST_BASE_URL\"] = httpbin.url" ]
[ "and creates a list. Returns the sum of the list # Example: 'Dog'", "'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y':", "using the letter # does not account for value modifications based on a", "2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8,", "'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G':", "'Dog' -> sum([2, 1, 2]) = 5 return sum([charDict[char] for char in word])", "list comprehension # Basically grabs the number value that correlates with the #", "1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1,", "a word to the base point value using the letter # does not", "0, 'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4,", "to the base point value using the letter # does not account for", "Refactored for loop with list comprehension # Basically grabs the number value that", "10, } # Refactored for loop with list comprehension # Basically grabs the", "'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N':", "'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z':", "1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8,", "to uppercase so it can find # keys in dictionary word = word.upper()", "'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P':", "': 0, 'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F':", "that correlates with the # letter in the word and creates a list.", "'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W':", "8, 'Y': 4, 'Z': 10, } # Refactored for loop with list comprehension", "find # keys in dictionary word = word.upper() # dictionary of letters and", "# keys in dictionary word = word.upper() # dictionary of letters and their", "value modifications based on a board def point_conversion(word): # Convert word passed in", "it can find # keys in dictionary word = word.upper() # dictionary of", "not account for value modifications based on a board def point_conversion(word): # Convert", "3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4,", "'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T':", "function to convert a word to the base point value using the letter", "values charDict = { ' ': 0, 'A': 1, 'B': 3, 'C': 3,", "number value that correlates with the # letter in the word and creates", "-> sum([2, 1, 2]) = 5 return sum([charDict[char] for char in word]) print(point_conversion('dog'))", "'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L':", "'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J':", "1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2,", "'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V':", "of the list # Example: 'Dog' -> sum([2, 1, 2]) = 5 return", "a board def point_conversion(word): # Convert word passed in to uppercase so it", "'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H':", "Example: 'Dog' -> sum([2, 1, 2]) = 5 return sum([charDict[char] for char in", "5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10,", "convert a word to the base point value using the letter # does", "4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 10, } # Refactored for", "charDict = { ' ': 0, 'A': 1, 'B': 3, 'C': 3, 'D':", "in dictionary word = word.upper() # dictionary of letters and their point values", "8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3,", "3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1,", "point value using the letter # does not account for value modifications based", "dictionary word = word.upper() # dictionary of letters and their point values charDict", "so it can find # keys in dictionary word = word.upper() # dictionary", "1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1,", "value that correlates with the # letter in the word and creates a", "uppercase so it can find # keys in dictionary word = word.upper() #", "with list comprehension # Basically grabs the number value that correlates with the", "4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1,", "'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M':", "'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q':", "in the word and creates a list. Returns the sum of the list", "' ': 0, 'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1,", "1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 10, } #", "2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3,", "'W': 4, 'X': 8, 'Y': 4, 'Z': 10, } # Refactored for loop", "and their point values charDict = { ' ': 0, 'A': 1, 'B':", "list. Returns the sum of the list # Example: 'Dog' -> sum([2, 1,", "for loop with list comprehension # Basically grabs the number value that correlates", "4, 'Z': 10, } # Refactored for loop with list comprehension # Basically", "word = word.upper() # dictionary of letters and their point values charDict =", "word.upper() # dictionary of letters and their point values charDict = { '", "dictionary of letters and their point values charDict = { ' ': 0,", "letter in the word and creates a list. Returns the sum of the", "letters and their point values charDict = { ' ': 0, 'A': 1,", "base point value using the letter # does not account for value modifications", "4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1,", "# Example: 'Dog' -> sum([2, 1, 2]) = 5 return sum([charDict[char] for char", "10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4,", "1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 10,", "account for value modifications based on a board def point_conversion(word): # Convert word", "in to uppercase so it can find # keys in dictionary word =", "the base point value using the letter # does not account for value", "'Z': 10, } # Refactored for loop with list comprehension # Basically grabs", "# letter in the word and creates a list. Returns the sum of", "list # Example: 'Dog' -> sum([2, 1, 2]) = 5 return sum([charDict[char] for", "the word and creates a list. Returns the sum of the list #", "grabs the number value that correlates with the # letter in the word", "'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R':", "the list # Example: 'Dog' -> sum([2, 1, 2]) = 5 return sum([charDict[char]", "for value modifications based on a board def point_conversion(word): # Convert word passed", "'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I':", "4, 'X': 8, 'Y': 4, 'Z': 10, } # Refactored for loop with", "word passed in to uppercase so it can find # keys in dictionary", "point values charDict = { ' ': 0, 'A': 1, 'B': 3, 'C':", "= word.upper() # dictionary of letters and their point values charDict = {", "'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O':", "word and creates a list. Returns the sum of the list # Example:", "on a board def point_conversion(word): # Convert word passed in to uppercase so", "does not account for value modifications based on a board def point_conversion(word): #", "value using the letter # does not account for value modifications based on", "1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5,", "'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K':", "'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X':", "1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1,", "word to the base point value using the letter # does not account", "Basically grabs the number value that correlates with the # letter in the", "keys in dictionary word = word.upper() # dictionary of letters and their point", "'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U':", "3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4,", "comprehension # Basically grabs the number value that correlates with the # letter", "# does not account for value modifications based on a board def point_conversion(word):", "the # letter in the word and creates a list. Returns the sum", "Returns the sum of the list # Example: 'Dog' -> sum([2, 1, 2])", "# Convert word passed in to uppercase so it can find # keys", "# function to convert a word to the base point value using the", "to convert a word to the base point value using the letter #", "creates a list. Returns the sum of the list # Example: 'Dog' ->", "point_conversion(word): # Convert word passed in to uppercase so it can find #", "def point_conversion(word): # Convert word passed in to uppercase so it can find", "the sum of the list # Example: 'Dog' -> sum([2, 1, 2]) =", "with the # letter in the word and creates a list. Returns the", "'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S':", "'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 10, }", "# dictionary of letters and their point values charDict = { ' ':", "'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 10, } # Refactored", "their point values charDict = { ' ': 0, 'A': 1, 'B': 3,", "1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4,", "1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1,", "# Refactored for loop with list comprehension # Basically grabs the number value", "{ ' ': 0, 'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E':", "board def point_conversion(word): # Convert word passed in to uppercase so it can", "can find # keys in dictionary word = word.upper() # dictionary of letters", "# Basically grabs the number value that correlates with the # letter in", "a list. Returns the sum of the list # Example: 'Dog' -> sum([2,", "based on a board def point_conversion(word): # Convert word passed in to uppercase", "Convert word passed in to uppercase so it can find # keys in", "= { ' ': 0, 'A': 1, 'B': 3, 'C': 3, 'D': 2,", "letter # does not account for value modifications based on a board def", "the letter # does not account for value modifications based on a board", "loop with list comprehension # Basically grabs the number value that correlates with", "} # Refactored for loop with list comprehension # Basically grabs the number", "'X': 8, 'Y': 4, 'Z': 10, } # Refactored for loop with list", "sum of the list # Example: 'Dog' -> sum([2, 1, 2]) = 5", "passed in to uppercase so it can find # keys in dictionary word", "the number value that correlates with the # letter in the word and", "3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1,", "correlates with the # letter in the word and creates a list. Returns", "modifications based on a board def point_conversion(word): # Convert word passed in to", "<filename>word_to_score.py<gh_stars>0 # function to convert a word to the base point value using", "of letters and their point values charDict = { ' ': 0, 'A':", "'Y': 4, 'Z': 10, } # Refactored for loop with list comprehension #" ]
[ "_: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) ) exit(1) return False", "case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case", "= True) -> List | bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens", "from typing import List from core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class", "from core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto()", "# < CELL_SHIFT_RIGHT = auto() # > LOOP_START = auto() # [ LOOP_END", "| bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i,", "case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case", "case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case", "lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = [] depth =", "enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case \"-\":", "\"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\":", "[ LOOP_END = auto() # ] PRINT_CHAR = auto() # % PRINT_NUM =", "core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() #", "auto() # | PRINT_NEWLINE = auto() # ^ ALPHA_RESET = auto() # $", "= auto() # $ UPPER_ALPHA_RESET = auto() # ? HARD_RESET = auto() #", "PRINT_NEWLINE = auto() # ^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET = auto()", "? HARD_RESET = auto() # ! VALUE_GET = auto() # / NEWLINE =", "< CELL_SHIFT_RIGHT = auto() # > LOOP_START = auto() # [ LOOP_END =", "MINUS = auto() # - CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT = auto()", "enum import Enum, auto from typing import List from core.error import print_error, Errors", "\" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN,", "# - CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT = auto() # > LOOP_START", "IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def", "# ] PRINT_CHAR = auto() # % PRINT_NUM = auto() # | PRINT_NEWLINE", "tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET)", "for token_i, token in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS)", "\"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\":", "case \";\": break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i)", "1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = [] depth = 0 try: for", "else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET)", "| \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line,", "\"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\":", "while depth: lst = lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups", "depth): while depth: lst = lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]):", "depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = [] depth = 0", "List | bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for", "auto() # - CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT = auto() # >", "if char == TokenType.LOOP_START: push([], groups, depth) depth += 1 elif char ==", "= auto() # + MINUS = auto() # - CELL_SHIFT_LEFT = auto() #", "^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET = auto() # ? HARD_RESET =", "groups def parse(source: str, filepath: str = None, exit_on_fail: bool = True) ->", "typing import List from core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum):", "auto() # [ LOOP_END = auto() # ] PRINT_CHAR = auto() # %", "tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE)", "\">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\":", "exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) ) exit(1) return False tokens.append(TokenType.NEWLINE) return", "# % PRINT_NUM = auto() # | PRINT_NEWLINE = auto() # ^ ALPHA_RESET", "- CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT = auto() # > LOOP_START =", "\"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \"", "depth = 0 try: for char in tokens: if char == TokenType.LOOP_START: push([],", "print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source:", "class TokenType(Enum): ADD = auto() # + MINUS = auto() # - CELL_SHIFT_LEFT", "\" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _: if exit_on_fail: print_error(", "-> List | bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = []", "str = None, exit_on_fail: bool = True) -> List | bool: if source.count(\"[\")", "if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) ) exit(1) return False tokens.append(TokenType.NEWLINE)", "= auto() WHITESPACE = auto() def push(obj, lst, depth): while depth: lst =", "List from core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD =", "import Enum, auto from typing import List from core.error import print_error, Errors VALID_TOKENS", "CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT = auto() # > LOOP_START = auto()", "tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET)", "\"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\":", "Enum, auto from typing import List from core.error import print_error, Errors VALID_TOKENS =", "groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else:", "push(obj, lst, depth): while depth: lst = lst[-1] depth -= 1 lst.append(obj) def", "= auto() # [ LOOP_END = auto() # ] PRINT_CHAR = auto() #", "tokens.append(TokenType.WHITESPACE) case \";\": break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i,", "bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i, line", "\"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _:", "# [ LOOP_END = auto() # ] PRINT_CHAR = auto() # % PRINT_NUM", "# > LOOP_START = auto() # [ LOOP_END = auto() # ] PRINT_CHAR", "ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET = auto() # ? HARD_RESET = auto()", "= auto() # % PRINT_NUM = auto() # | PRINT_NEWLINE = auto() #", "auto() def push(obj, lst, depth): while depth: lst = lst[-1] depth -= 1", "[] depth = 0 try: for char in tokens: if char == TokenType.LOOP_START:", "push([], groups, depth) depth += 1 elif char == TokenType.LOOP_END: depth -= 1", "= auto() # / NEWLINE = auto() WHITESPACE = auto() def push(obj, lst,", "bool = True) -> List | bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1)", "char == TokenType.LOOP_END: depth -= 1 else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET)", "= auto() # | PRINT_NEWLINE = auto() # ^ ALPHA_RESET = auto() #", "lst, depth): while depth: lst = lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens:", "in tokens: if char == TokenType.LOOP_START: push([], groups, depth) depth += 1 elif", "print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() # + MINUS", "depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return", "parse(source: str, filepath: str = None, exit_on_fail: bool = True) -> List |", "in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case", "\"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\":", "case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case", "= auto() # > LOOP_START = auto() # [ LOOP_END = auto() #", "= auto() # ! VALUE_GET = auto() # / NEWLINE = auto() WHITESPACE", "= lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = [] depth", "exit(1) else: return groups def parse(source: str, filepath: str = None, exit_on_fail: bool", "LOOP_END = auto() # ] PRINT_CHAR = auto() # % PRINT_NUM = auto()", "case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case", "tokens: if char == TokenType.LOOP_START: push([], groups, depth) depth += 1 elif char", "tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET)", "for char in tokens: if char == TokenType.LOOP_START: push([], groups, depth) depth +=", "print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) ) exit(1) return False tokens.append(TokenType.NEWLINE) return parse_brackets(tokens)", "] PRINT_CHAR = auto() # % PRINT_NUM = auto() # | PRINT_NEWLINE =", "= auto() def push(obj, lst, depth): while depth: lst = lst[-1] depth -=", "case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) ) exit(1) return", "auto() # ? HARD_RESET = auto() # ! VALUE_GET = auto() # /", "try: for char in tokens: if char == TokenType.LOOP_START: push([], groups, depth) depth", "= auto() # ] PRINT_CHAR = auto() # % PRINT_NUM = auto() #", "TokenType.LOOP_END: depth -= 1 else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if", "print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i, line in enumerate(source.strip().splitlines()): for token_i, token", "0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source: str, filepath: str = None,", "lst = lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = []", "None, exit_on_fail: bool = True) -> List | bool: if source.count(\"[\") != source.count(\"]\"):", "TokenType.LOOP_START: push([], groups, depth) depth += 1 elif char == TokenType.LOOP_END: depth -=", "token_i, token in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case", "line_i, line in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match token: case \"+\":", "# / NEWLINE = auto() WHITESPACE = auto() def push(obj, lst, depth): while", "-= 1 else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth >", "exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source: str,", "\"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\":", "lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = [] depth = 0 try: for char", "token in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\":", "ADD = auto() # + MINUS = auto() # - CELL_SHIFT_LEFT = auto()", "import List from core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD", "= None, exit_on_fail: bool = True) -> List | bool: if source.count(\"[\") !=", "+= 1 elif char == TokenType.LOOP_END: depth -= 1 else: push(char, groups, depth)", "exit(1) tokens = [] for line_i, line in enumerate(source.strip().splitlines()): for token_i, token in", "[] for line_i, line in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match token:", "tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET)", "| PRINT_NEWLINE = auto() # ^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET =", "1 elif char == TokenType.LOOP_END: depth -= 1 else: push(char, groups, depth) except", "case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case", "# $ UPPER_ALPHA_RESET = auto() # ? HARD_RESET = auto() # ! VALUE_GET", "List[TokenType]): groups = [] depth = 0 try: for char in tokens: if", "push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1)", "groups, depth) depth += 1 elif char == TokenType.LOOP_END: depth -= 1 else:", "= \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() # + MINUS = auto() #", "= auto() # ^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET = auto() #", "= auto() # ? HARD_RESET = auto() # ! VALUE_GET = auto() #", "# ? HARD_RESET = auto() # ! VALUE_GET = auto() # / NEWLINE", "auto() # ! VALUE_GET = auto() # / NEWLINE = auto() WHITESPACE =", "case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case", "print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source: str, filepath: str = None, exit_on_fail:", "auto() # % PRINT_NUM = auto() # | PRINT_NEWLINE = auto() # ^", "str, filepath: str = None, exit_on_fail: bool = True) -> List | bool:", "parse_brackets(tokens: List[TokenType]): groups = [] depth = 0 try: for char in tokens:", "tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM)", "0 try: for char in tokens: if char == TokenType.LOOP_START: push([], groups, depth)", "case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case", "auto() # ] PRINT_CHAR = auto() # % PRINT_NUM = auto() # |", "== TokenType.LOOP_END: depth -= 1 else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1)", "% PRINT_NUM = auto() # | PRINT_NEWLINE = auto() # ^ ALPHA_RESET =", "char == TokenType.LOOP_START: push([], groups, depth) depth += 1 elif char == TokenType.LOOP_END:", "filepath: str = None, exit_on_fail: bool = True) -> List | bool: if", "\"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\":", "depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source: str, filepath: str", "tokens = [] for line_i, line in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line):", "= auto() # < CELL_SHIFT_RIGHT = auto() # > LOOP_START = auto() #", "auto from typing import List from core.error import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\"", "source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i, line in enumerate(source.strip().splitlines()): for token_i,", "case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case", "\"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath,", "case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _: if exit_on_fail:", "True) -> List | bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens =", "\"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\":", "def parse_brackets(tokens: List[TokenType]): groups = [] depth = 0 try: for char in", "\"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\":", "! VALUE_GET = auto() # / NEWLINE = auto() WHITESPACE = auto() def", "\";\": break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) )", "+ MINUS = auto() # - CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT =", "elif char == TokenType.LOOP_END: depth -= 1 else: push(char, groups, depth) except IndexError:", "depth += 1 elif char == TokenType.LOOP_END: depth -= 1 else: push(char, groups,", "auto() WHITESPACE = auto() def push(obj, lst, depth): while depth: lst = lst[-1]", "LOOP_START = auto() # [ LOOP_END = auto() # ] PRINT_CHAR = auto()", "in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT)", "line in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD)", "except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups", "PRINT_NUM = auto() # | PRINT_NEWLINE = auto() # ^ ALPHA_RESET = auto()", "tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break", "= auto() # - CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT = auto() #", "WHITESPACE = auto() def push(obj, lst, depth): while depth: lst = lst[-1] depth", "= 0 try: for char in tokens: if char == TokenType.LOOP_START: push([], groups,", "enumerate(line): match token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case", "\"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() # + MINUS = auto() # -", "tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \"", "= [] depth = 0 try: for char in tokens: if char ==", "VALUE_GET = auto() # / NEWLINE = auto() WHITESPACE = auto() def push(obj,", "auto() # / NEWLINE = auto() WHITESPACE = auto() def push(obj, lst, depth):", "token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT)", "tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR)", "TokenType(Enum): ADD = auto() # + MINUS = auto() # - CELL_SHIFT_LEFT =", "= [] for line_i, line in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match", "def push(obj, lst, depth): while depth: lst = lst[-1] depth -= 1 lst.append(obj)", "case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" |", "# | PRINT_NEWLINE = auto() # ^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET", "> LOOP_START = auto() # [ LOOP_END = auto() # ] PRINT_CHAR =", "NEWLINE = auto() WHITESPACE = auto() def push(obj, lst, depth): while depth: lst", "/ NEWLINE = auto() WHITESPACE = auto() def push(obj, lst, depth): while depth:", "case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case", "tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START)", "exit_on_fail: bool = True) -> List | bool: if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET)", "match token: case \"+\": tokens.append(TokenType.ADD) case \"-\": tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\":", "# ! VALUE_GET = auto() # / NEWLINE = auto() WHITESPACE = auto()", "\"|\": tokens.append(TokenType.PRINT_NUM) case \"^\": tokens.append(TokenType.PRINT_NEWLINE) case \"!\": tokens.append(TokenType.HARD_RESET) case \"$\": tokens.append(TokenType.ALPHA_RESET) case \"?\":", "VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() # + MINUS = auto()", "HARD_RESET = auto() # ! VALUE_GET = auto() # / NEWLINE = auto()", "PRINT_CHAR = auto() # % PRINT_NUM = auto() # | PRINT_NEWLINE = auto()", "1 else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth > 0:", "UPPER_ALPHA_RESET = auto() # ? HARD_RESET = auto() # ! VALUE_GET = auto()", "# ^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET = auto() # ? HARD_RESET", "tokens.append(TokenType.MINUS) case \"<\": tokens.append(TokenType.CELL_SHIFT_LEFT) case \">\": tokens.append(TokenType.CELL_SHIFT_RIGHT) case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END)", "tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case _: if", "auto() # $ UPPER_ALPHA_RESET = auto() # ? HARD_RESET = auto() # !", "-= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups = [] depth = 0 try:", "$ UPPER_ALPHA_RESET = auto() # ? HARD_RESET = auto() # ! VALUE_GET =", "auto() # > LOOP_START = auto() # [ LOOP_END = auto() # ]", "!= source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i, line in enumerate(source.strip().splitlines()): for", "> 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source: str, filepath: str =", "auto() # + MINUS = auto() # - CELL_SHIFT_LEFT = auto() # <", "case \"[\": tokens.append(TokenType.LOOP_START) case \"]\": tokens.append(TokenType.LOOP_END) case \"%\": tokens.append(TokenType.PRINT_CHAR) case \"|\": tokens.append(TokenType.PRINT_NUM) case", "def parse(source: str, filepath: str = None, exit_on_fail: bool = True) -> List", "auto() # ^ ALPHA_RESET = auto() # $ UPPER_ALPHA_RESET = auto() # ?", "if source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i, line in", "depth -= 1 else: push(char, groups, depth) except IndexError: print_error(Errors.UNMATCHED_BRACKET) exit(1) if depth", "break case _: if exit_on_fail: print_error( Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i) ) exit(1)", "Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() # + MINUS =", "if depth > 0: print_error(Errors.UNMATCHED_BRACKET) exit(1) else: return groups def parse(source: str, filepath:", "for line_i, line in enumerate(source.strip().splitlines()): for token_i, token in enumerate(line): match token: case", "case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE) case \";\": break case", "depth: lst = lst[-1] depth -= 1 lst.append(obj) def parse_brackets(tokens: List[TokenType]): groups =", "source.count(\"[\") != source.count(\"]\"): print_error(Errors.UNMATCHED_BRACKET) exit(1) tokens = [] for line_i, line in enumerate(source.strip().splitlines()):", "tokens.append(TokenType.ALPHA_RESET) case \"?\": tokens.append(TokenType.UPPER_ALPHA_RESET) case \"/\": tokens.append(TokenType.VALUE_GET) case \" \" | \"\\t\": tokens.append(TokenType.WHITESPACE)", "== TokenType.LOOP_START: push([], groups, depth) depth += 1 elif char == TokenType.LOOP_END: depth", "char in tokens: if char == TokenType.LOOP_START: push([], groups, depth) depth += 1", "depth) depth += 1 elif char == TokenType.LOOP_END: depth -= 1 else: push(char,", "CELL_SHIFT_RIGHT = auto() # > LOOP_START = auto() # [ LOOP_END = auto()", "groups = [] depth = 0 try: for char in tokens: if char", "else: return groups def parse(source: str, filepath: str = None, exit_on_fail: bool =", "return groups def parse(source: str, filepath: str = None, exit_on_fail: bool = True)", "import print_error, Errors VALID_TOKENS = \"+-><][%|^$?!/\" class TokenType(Enum): ADD = auto() # +", "from enum import Enum, auto from typing import List from core.error import print_error,", "# + MINUS = auto() # - CELL_SHIFT_LEFT = auto() # < CELL_SHIFT_RIGHT", "auto() # < CELL_SHIFT_RIGHT = auto() # > LOOP_START = auto() # [" ]
[ "lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy']) elif row['energy#']=='energy2': temp_panda.loc[index,'cfmid-collision']='40-'+str(row['Collision_energy'])", "import pandas ################# input_panda_address=snakemake.input.input_panda_address output_panda_address=snakemake.output.output_panda_address ################# input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda", "output_panda_address=snakemake.output.output_panda_address ################# input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row in temp_panda.iterrows():", "#if the rank observed is lower than the lowest rank, record that if", "index,row in temp_panda.iterrows(): #if the rank observed is lower than the lowest rank,", "def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row in temp_panda.iterrows(): #if the rank", "entire panda for index,row in temp_panda.iterrows(): #if the rank observed is lower than", "than the lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy']) elif", "the lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy']) elif row['energy#']=='energy2':", "through entire panda for index,row in temp_panda.iterrows(): #if the rank observed is lower", "#iterrate through entire panda for index,row in temp_panda.iterrows(): #if the rank observed is", "for index,row in temp_panda.iterrows(): #if the rank observed is lower than the lowest", "is lower than the lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1':", "################# input_panda_address=snakemake.input.input_panda_address output_panda_address=snakemake.output.output_panda_address ################# input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row", "fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row in temp_panda.iterrows(): #if the rank observed", "observed is lower than the lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif", "record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy']) elif row['energy#']=='energy2': temp_panda.loc[index,'cfmid-collision']='40-'+str(row['Collision_energy']) input_panda.insert(loc=input_panda.columns.size-2,column='cfmid-collision',value='null') fill_cfmid_collision_energy_column(input_panda)", "rank observed is lower than the lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy'])", "################# input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row in temp_panda.iterrows(): #if", "the rank observed is lower than the lowest rank, record that if row['energy#']=='energy0':", "pandas ################# input_panda_address=snakemake.input.input_panda_address output_panda_address=snakemake.output.output_panda_address ################# input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for", "in temp_panda.iterrows(): #if the rank observed is lower than the lowest rank, record", "temp_panda.iterrows(): #if the rank observed is lower than the lowest rank, record that", "that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy']) elif row['energy#']=='energy2': temp_panda.loc[index,'cfmid-collision']='40-'+str(row['Collision_energy']) input_panda.insert(loc=input_panda.columns.size-2,column='cfmid-collision',value='null') fill_cfmid_collision_energy_column(input_panda) input_panda.to_csv(output_panda_address,sep='¬',index=False)", "input_panda_address=snakemake.input.input_panda_address output_panda_address=snakemake.output.output_panda_address ################# input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row in", "panda for index,row in temp_panda.iterrows(): #if the rank observed is lower than the", "lower than the lowest rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy'])", "rank, record that if row['energy#']=='energy0': temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy']) elif row['energy#']=='energy1': temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy']) elif row['energy#']=='energy2': temp_panda.loc[index,'cfmid-collision']='40-'+str(row['Collision_energy']) input_panda.insert(loc=input_panda.columns.size-2,column='cfmid-collision',value='null')", "input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0) def fill_cfmid_collision_energy_column(temp_panda): #iterrate through entire panda for index,row in temp_panda.iterrows(): #if the" ]
[ "of images of the first identities to be morphed :param x2: A batch", "some encoders but the default implementation takes the mean. \"\"\" import torch class", "to morph with the x1 images :param use_mean: use z_mean instead of sampling", "when called directly. The default implementation assumes a VAE like encoder that returns", "method that captures all morphing methods proposed in RT, apart from gradient descend.", "overridden by some encoders but the default implementation takes the mean. \"\"\" import", "and returns the outcome latent representation. :param x1: A batch of images of", ":return: \"\"\" z = 0.5 * (z1 + z2) return z def encode(self,", "z1 and z2 when enabled :return: A batch of morphed z values. These", "in x1 with the images in x2 and returns the outcome latent representation.", "from gradient descend. This method is meant to be overridden by some encoders", "of morphed z values. These will have to go through the decoder/Gx in", "0.5 * (z1 + z2) return z def encode(self, x, use_mean=False): \"\"\" Encodes", "implementation takes the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2,", "be overridden by some encoders but the default implementation takes the mean. \"\"\"", "with the images in x2 and returns the outcome latent representation. :param x1:", "descend. This method is meant to be overridden by some encoders but the", "the first identities to be morphed :param z2: a batch of latent vectors", "first identities to be morphed :param z2: a batch of latent vectors for", "batch of latent vectors for the first identities to be morphed :param z2:", "enabled :return: A batch of morphed z values. These will have to go", "import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the", "z2 return z def morph_zs(self, z1, z2): \"\"\" Morphs the latent vectors z1", "the first element is a sample. If this is not the case, the", "not the case, the method should be overridden. :param x: A batch of", "This allows for a single method that captures all morphing methods proposed in", "images in x1 with the images in x2 and returns the outcome latent", "Retruns z_morph, z1 and z2 when enabled :return: A batch of morphed z", ":return: A batch of morphed z values. These will have to go through", "the first identities to be morphed :param x2: A batch of images to", "with the x1 images :param use_mean: use z_mean instead of sampling from q(z|x)", "return_all: Retruns z_morph, z1 and z2 when enabled :return: A batch of morphed", "that captures all morphing methods proposed in RT, apart from gradient descend. This", "self.morph_zs(z1, z2) if return_all: return z, z1, z2 return z def morph_zs(self, z1,", "= self.morph_zs(z1, z2) if return_all: return z, z1, z2 return z def morph_zs(self,", "z, z1, z2 return z def morph_zs(self, z1, z2): \"\"\" Morphs the latent", "overridden. :param x: A batch of images :return: A list of latent representations", "x1: A batch of images of the first identities to be morphed :param", "to decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1,", "If this is not the case, the method should be overridden. :param x:", "list of latent representations of these images in x \"\"\" z, zm, _", "these images in x \"\"\" z, zm, _ = self(x) if not use_mean:", "images :return: A list of latent representations of these images in x \"\"\"", "first identities to be morphed :param x2: A batch of images to morph", "method is meant to be overridden by some encoders but the default implementation", "vectors z1 and z2 and outputs z_morph :param z1: a batch of latent", "single method that captures all morphing methods proposed in RT, apart from gradient", "z1, z2 return z def morph_zs(self, z1, z2): \"\"\" Morphs the latent vectors", "from q(z|x) :param return_all: Retruns z_morph, z1 and z2 when enabled :return: A", "a single method that captures all morphing methods proposed in RT, apart from", "batch of images to morph with the x1 images :param use_mean: use z_mean", "z1, z2): \"\"\" Morphs the latent vectors z1 and z2 and outputs z_morph", "for the second identities to be morphed :return: \"\"\" z = 0.5 *", "called directly. The default implementation assumes a VAE like encoder that returns a", "that returns a 3-tuple where the first element is a sample. If this", "representation. :param x1: A batch of images of the first identities to be", "encoders but the default implementation takes the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module):", "def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the images in x1 with", "batch of images of the first identities to be morphed :param x2: A", "of the first identities to be morphed :param x2: A batch of images", "This method exists to unify the return values. Different models might return more", "to generate morphs. This allows for a single method that captures all morphing", "return values. Different models might return more values when called directly. The default", "use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all: return z, z1, z2", "\"\"\" Morphs the images in x1 with the images in x2 and returns", "the second identities to be morphed :return: \"\"\" z = 0.5 * (z1", "values when called directly. The default implementation assumes a VAE like encoder that", "the outcome latent representation. :param x1: A batch of images of the first", "of sampling from q(z|x) :param return_all: Retruns z_morph, z1 and z2 when enabled", "x1 with the images in x2 and returns the outcome latent representation. :param", "of images :return: A list of latent representations of these images in x", "the decoder/Gx in order to decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2,", "outcome latent representation. :param x1: A batch of images of the first identities", "first element is a sample. If this is not the case, the method", "directly. The default implementation assumes a VAE like encoder that returns a 3-tuple", "default implementation takes the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1,", "z_morph :param z1: a batch of latent vectors for the first identities to", "images to morph with the x1 images :param use_mean: use z_mean instead of", "use_mean: use z_mean instead of sampling from q(z|x) :param return_all: Retruns z_morph, z1", "through the decoder/Gx in order to decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean),", "vector. This method exists to unify the return values. Different models might return", "self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all: return z, z1,", "q(z|x) :param return_all: Retruns z_morph, z1 and z2 when enabled :return: A batch", "to go through the decoder/Gx in order to decode. \"\"\" z1, z2 =", "morphed :return: \"\"\" z = 0.5 * (z1 + z2) return z def", "use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all: return z, z1, z2 return z", "go through the decoder/Gx in order to decode. \"\"\" z1, z2 = self.encode(x1,", "z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all: return", "z values. These will have to go through the decoder/Gx in order to", "z2 and outputs z_morph :param z1: a batch of latent vectors for the", "might return more values when called directly. The default implementation assumes a VAE", ":param z1: a batch of latent vectors for the first identities to be", "morphing methods proposed in RT, apart from gradient descend. This method is meant", "images in x \"\"\" z, zm, _ = self(x) if not use_mean: return", ":param x2: A batch of images to morph with the x1 images :param", "z2: a batch of latent vectors for the second identities to be morphed", "to be morphed :param z2: a batch of latent vectors for the second", "a VAE like encoder that returns a 3-tuple where the first element is", "like encoder that returns a 3-tuple where the first element is a sample.", "takes the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False,", "batch of latent vectors for the second identities to be morphed :return: \"\"\"", "unify the return values. Different models might return more values when called directly.", "values. Different models might return more values when called directly. The default implementation", "that is able to generate morphs. This allows for a single method that", "latent vectors z1 and z2 and outputs z_morph :param z1: a batch of", "instead of sampling from q(z|x) :param return_all: Retruns z_morph, z1 and z2 when", "latent vector. This method exists to unify the return values. Different models might", "representations of these images in x \"\"\" z, zm, _ = self(x) if", "A list of latent representations of these images in x \"\"\" z, zm,", "should be overridden. :param x: A batch of images :return: A list of", "z, zm, _ = self(x) if not use_mean: return z else: return zm", "MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the images in x1", "the default implementation takes the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self,", "z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all:", "second identities to be morphed :return: \"\"\" z = 0.5 * (z1 +", "be morphed :param z2: a batch of latent vectors for the second identities", "morphs. This allows for a single method that captures all morphing methods proposed", "exists to unify the return values. Different models might return more values when", "The default implementation assumes a VAE like encoder that returns a 3-tuple where", "the method should be overridden. :param x: A batch of images :return: A", "def encode(self, x, use_mean=False): \"\"\" Encodes x to a latent vector. This method", "returns a 3-tuple where the first element is a sample. If this is", "x: A batch of images :return: A list of latent representations of these", "= self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all: return z,", "be morphed :param x2: A batch of images to morph with the x1", "vectors for the first identities to be morphed :param z2: a batch of", "z1: a batch of latent vectors for the first identities to be morphed", "gradient descend. This method is meant to be overridden by some encoders but", "use z_mean instead of sampling from q(z|x) :param return_all: Retruns z_morph, z1 and", "of latent vectors for the second identities to be morphed :return: \"\"\" z", "method should be overridden. :param x: A batch of images :return: A list", "in x \"\"\" z, zm, _ = self(x) if not use_mean: return z", "z1 and z2 and outputs z_morph :param z1: a batch of latent vectors", "all morphing methods proposed in RT, apart from gradient descend. This method is", "These will have to go through the decoder/Gx in order to decode. \"\"\"", "the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False):", ":param z2: a batch of latent vectors for the second identities to be", "encode(self, x, use_mean=False): \"\"\" Encodes x to a latent vector. This method exists", "of latent representations of these images in x \"\"\" z, zm, _ =", "return z def encode(self, x, use_mean=False): \"\"\" Encodes x to a latent vector.", "of latent vectors for the first identities to be morphed :param z2: a", "is not the case, the method should be overridden. :param x: A batch", "and outputs z_morph :param z1: a batch of latent vectors for the first", "x2 and returns the outcome latent representation. :param x1: A batch of images", "x2, use_mean=False, return_all=False): \"\"\" Morphs the images in x1 with the images in", ":param x: A batch of images :return: A list of latent representations of", "z = self.morph_zs(z1, z2) if return_all: return z, z1, z2 return z def", "images :param use_mean: use z_mean instead of sampling from q(z|x) :param return_all: Retruns", "and z2 and outputs z_morph :param z1: a batch of latent vectors for", "(z1 + z2) return z def encode(self, x, use_mean=False): \"\"\" Encodes x to", "z def encode(self, x, use_mean=False): \"\"\" Encodes x to a latent vector. This", "meant to be overridden by some encoders but the default implementation takes the", "class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the images in", "to be morphed :param x2: A batch of images to morph with the", "decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2)", "self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if return_all: return z, z1, z2 return", "in RT, apart from gradient descend. This method is meant to be overridden", "for the first identities to be morphed :param z2: a batch of latent", "to a latent vector. This method exists to unify the return values. Different", "x2: A batch of images to morph with the x1 images :param use_mean:", "A batch of images of the first identities to be morphed :param x2:", "methods proposed in RT, apart from gradient descend. This method is meant to", "z = 0.5 * (z1 + z2) return z def encode(self, x, use_mean=False):", "the images in x1 with the images in x2 and returns the outcome", "assumes a VAE like encoder that returns a 3-tuple where the first element", "models might return more values when called directly. The default implementation assumes a", "This method is meant to be overridden by some encoders but the default", "latent vectors for the second identities to be morphed :return: \"\"\" z =", "a sample. If this is not the case, the method should be overridden.", "\"\"\" Morphs the latent vectors z1 and z2 and outputs z_morph :param z1:", "morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the images in x1 with the", "use_mean=False, return_all=False): \"\"\" Morphs the images in x1 with the images in x2", "A batch of images :return: A list of latent representations of these images", "VAE like encoder that returns a 3-tuple where the first element is a", "to be morphed :return: \"\"\" z = 0.5 * (z1 + z2) return", "the return values. Different models might return more values when called directly. The", "\"\"\" Encodes x to a latent vector. This method exists to unify the", "order to decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z =", "generate morphs. This allows for a single method that captures all morphing methods", "Morphs the images in x1 with the images in x2 and returns the", "if return_all: return z, z1, z2 return z def morph_zs(self, z1, z2): \"\"\"", "sampling from q(z|x) :param return_all: Retruns z_morph, z1 and z2 when enabled :return:", "images of the first identities to be morphed :param x2: A batch of", "return z def morph_zs(self, z1, z2): \"\"\" Morphs the latent vectors z1 and", "z2): \"\"\" Morphs the latent vectors z1 and z2 and outputs z_morph :param", ":return: A list of latent representations of these images in x \"\"\" z,", "x \"\"\" z, zm, _ = self(x) if not use_mean: return z else:", "a Gz/Encoder that is able to generate morphs. This allows for a single", "morphed z values. These will have to go through the decoder/Gx in order", "\"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs", "latent representation. :param x1: A batch of images of the first identities to", "will have to go through the decoder/Gx in order to decode. \"\"\" z1,", "be overridden. :param x: A batch of images :return: A list of latent", "Gz/Encoder that is able to generate morphs. This allows for a single method", "in x2 and returns the outcome latent representation. :param x1: A batch of", "torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the images", "a batch of latent vectors for the second identities to be morphed :return:", "implementation assumes a VAE like encoder that returns a 3-tuple where the first", "return more values when called directly. The default implementation assumes a VAE like", "z_morph, z1 and z2 when enabled :return: A batch of morphed z values.", "Different models might return more values when called directly. The default implementation assumes", "return_all: return z, z1, z2 return z def morph_zs(self, z1, z2): \"\"\" Morphs", "is meant to be overridden by some encoders but the default implementation takes", "morph with the x1 images :param use_mean: use z_mean instead of sampling from", "* (z1 + z2) return z def encode(self, x, use_mean=False): \"\"\" Encodes x", "= 0.5 * (z1 + z2) return z def encode(self, x, use_mean=False): \"\"\"", "where the first element is a sample. If this is not the case,", ":param return_all: Retruns z_morph, z1 and z2 when enabled :return: A batch of", "return_all=False): \"\"\" Morphs the images in x1 with the images in x2 and", "identities to be morphed :param x2: A batch of images to morph with", "3-tuple where the first element is a sample. If this is not the", "more values when called directly. The default implementation assumes a VAE like encoder", "Encodes x to a latent vector. This method exists to unify the return", "A batch of morphed z values. These will have to go through the", "when enabled :return: A batch of morphed z values. These will have to", ":param use_mean: use z_mean instead of sampling from q(z|x) :param return_all: Retruns z_morph,", "a latent vector. This method exists to unify the return values. Different models", "is a sample. If this is not the case, the method should be", "return z, z1, z2 return z def morph_zs(self, z1, z2): \"\"\" Morphs the", "+ z2) return z def encode(self, x, use_mean=False): \"\"\" Encodes x to a", "\"\"\" z, zm, _ = self(x) if not use_mean: return z else: return", "z2 when enabled :return: A batch of morphed z values. These will have", "A batch of images to morph with the x1 images :param use_mean: use", "be morphed :return: \"\"\" z = 0.5 * (z1 + z2) return z", "apart from gradient descend. This method is meant to be overridden by some", "Models a Gz/Encoder that is able to generate morphs. This allows for a", "RT, apart from gradient descend. This method is meant to be overridden by", "morphed :param z2: a batch of latent vectors for the second identities to", "<reponame>Gerryflap/master_thesis \"\"\" Models a Gz/Encoder that is able to generate morphs. This allows", "morph_zs(self, z1, z2): \"\"\" Morphs the latent vectors z1 and z2 and outputs", "have to go through the decoder/Gx in order to decode. \"\"\" z1, z2", "element is a sample. If this is not the case, the method should", "to be overridden by some encoders but the default implementation takes the mean.", "case, the method should be overridden. :param x: A batch of images :return:", "for a single method that captures all morphing methods proposed in RT, apart", "morphed :param x2: A batch of images to morph with the x1 images", "Morphs the latent vectors z1 and z2 and outputs z_morph :param z1: a", "identities to be morphed :return: \"\"\" z = 0.5 * (z1 + z2)", "vectors for the second identities to be morphed :return: \"\"\" z = 0.5", "mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def morph(self, x1, x2, use_mean=False, return_all=False): \"\"\"", "outputs z_morph :param z1: a batch of latent vectors for the first identities", "method exists to unify the return values. Different models might return more values", "z2) return z def encode(self, x, use_mean=False): \"\"\" Encodes x to a latent", "values. These will have to go through the decoder/Gx in order to decode.", "is able to generate morphs. This allows for a single method that captures", "x, use_mean=False): \"\"\" Encodes x to a latent vector. This method exists to", "by some encoders but the default implementation takes the mean. \"\"\" import torch", "allows for a single method that captures all morphing methods proposed in RT,", "but the default implementation takes the mean. \"\"\" import torch class MorphingEncoder(torch.nn.Module): def", "returns the outcome latent representation. :param x1: A batch of images of the", ":param x1: A batch of images of the first identities to be morphed", "z_mean instead of sampling from q(z|x) :param return_all: Retruns z_morph, z1 and z2", "in order to decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z", "captures all morphing methods proposed in RT, apart from gradient descend. This method", "a 3-tuple where the first element is a sample. If this is not", "sample. If this is not the case, the method should be overridden. :param", "\"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean) z = self.morph_zs(z1, z2) if", "\"\"\" Models a Gz/Encoder that is able to generate morphs. This allows for", "batch of images :return: A list of latent representations of these images in", "proposed in RT, apart from gradient descend. This method is meant to be", "batch of morphed z values. These will have to go through the decoder/Gx", "the images in x2 and returns the outcome latent representation. :param x1: A", "z2) if return_all: return z, z1, z2 return z def morph_zs(self, z1, z2):", "able to generate morphs. This allows for a single method that captures all", "of images to morph with the x1 images :param use_mean: use z_mean instead", "the x1 images :param use_mean: use z_mean instead of sampling from q(z|x) :param", "and z2 when enabled :return: A batch of morphed z values. These will", "def morph_zs(self, z1, z2): \"\"\" Morphs the latent vectors z1 and z2 and", "use_mean=False): \"\"\" Encodes x to a latent vector. This method exists to unify", "x1 images :param use_mean: use z_mean instead of sampling from q(z|x) :param return_all:", "the latent vectors z1 and z2 and outputs z_morph :param z1: a batch", "default implementation assumes a VAE like encoder that returns a 3-tuple where the", "a batch of latent vectors for the first identities to be morphed :param", "this is not the case, the method should be overridden. :param x: A", "latent vectors for the first identities to be morphed :param z2: a batch", "of these images in x \"\"\" z, zm, _ = self(x) if not", "latent representations of these images in x \"\"\" z, zm, _ = self(x)", "z def morph_zs(self, z1, z2): \"\"\" Morphs the latent vectors z1 and z2", "to unify the return values. Different models might return more values when called", "the case, the method should be overridden. :param x: A batch of images", "encoder that returns a 3-tuple where the first element is a sample. If", "x1, x2, use_mean=False, return_all=False): \"\"\" Morphs the images in x1 with the images", "\"\"\" z = 0.5 * (z1 + z2) return z def encode(self, x,", "images in x2 and returns the outcome latent representation. :param x1: A batch", "identities to be morphed :param z2: a batch of latent vectors for the", "decoder/Gx in order to decode. \"\"\" z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean)", "x to a latent vector. This method exists to unify the return values." ]
[ ":type nums: List[int] :rtype: List[int] \"\"\" n = len(nums) duplication = sum(nums) -", "= len(nums) duplication = sum(nums) - sum(set(nums)) missing = n * (n +", "List[int] :rtype: List[int] \"\"\" n = len(nums) duplication = sum(nums) - sum(set(nums)) missing", "= n * (n + 1) // 2 - sum(set(nums)) return [duplication, missing]", "nums): \"\"\" :type nums: List[int] :rtype: List[int] \"\"\" n = len(nums) duplication =", "len(nums) duplication = sum(nums) - sum(set(nums)) missing = n * (n + 1)", ":rtype: List[int] \"\"\" n = len(nums) duplication = sum(nums) - sum(set(nums)) missing =", "missing = n * (n + 1) // 2 - sum(set(nums)) return [duplication,", "nums: List[int] :rtype: List[int] \"\"\" n = len(nums) duplication = sum(nums) - sum(set(nums))", "sum(nums) - sum(set(nums)) missing = n * (n + 1) // 2 -", "class Solution: def findErrorNums(self, nums): \"\"\" :type nums: List[int] :rtype: List[int] \"\"\" n", "\"\"\" :type nums: List[int] :rtype: List[int] \"\"\" n = len(nums) duplication = sum(nums)", "= sum(nums) - sum(set(nums)) missing = n * (n + 1) // 2", "findErrorNums(self, nums): \"\"\" :type nums: List[int] :rtype: List[int] \"\"\" n = len(nums) duplication", "duplication = sum(nums) - sum(set(nums)) missing = n * (n + 1) //", "- sum(set(nums)) missing = n * (n + 1) // 2 - sum(set(nums))", "List[int] \"\"\" n = len(nums) duplication = sum(nums) - sum(set(nums)) missing = n", "Solution: def findErrorNums(self, nums): \"\"\" :type nums: List[int] :rtype: List[int] \"\"\" n =", "\"\"\" n = len(nums) duplication = sum(nums) - sum(set(nums)) missing = n *", "n = len(nums) duplication = sum(nums) - sum(set(nums)) missing = n * (n", "def findErrorNums(self, nums): \"\"\" :type nums: List[int] :rtype: List[int] \"\"\" n = len(nums)", "<filename>Python/645set_mismatch.py class Solution: def findErrorNums(self, nums): \"\"\" :type nums: List[int] :rtype: List[int] \"\"\"", "sum(set(nums)) missing = n * (n + 1) // 2 - sum(set(nums)) return" ]
[ "def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node, context): return", "node.partition: self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context)", "Unless required by applicable law or agreed to in writing, software # distributed", "self.process(node.base, context) return None \"\"\" def visit_window(self, node, context) for expression in node.partition:", "def visit_array_constructor(self, node, context): return self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return self.visit_expression(node,", "isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def", "None def visit_intersect(self, node, context): for relation in node.relations: self.process(relation, context) return None", "def visit_exists(self, node, context): return self.visit_expression(node, context) def visit_try_expression(self, node, context): return self.visit_expression(node,", "return self.visit_node(node, context) def visit_window_frame(self, node, context): return self.visit_node(node, context) def visit_frame_bound(self, node,", "visit_expression(self, node, context): return self.visit_node(node, context) def visit_reset_session(self, node, context): return self.visit_statement(node, context)", "if node.value: self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context)", "visit_rename_column(self, node, context): return self.visit_statement(node, context) def visit_add_column(self, node, context): return self.visit_statement(node, context)", "node, context): return self.visit_statement(node, context) def visit_show_tables(self, node, context): return self.visit_statement(node, context) def", "self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def visit_subquery_expression(self, node, context):", "self.visit_expression(node, context) def visit_extract(self, node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context):", "visit_dereference_expression(self, node, context): self.process(node.base, context) return None \"\"\" def visit_window(self, node, context) for", "self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def visit_string_literal(self, node, context):", "None def visit_row(self, node, context): for expression in node.items: self.process(expression, context) return None", "node, context): return self.process(node.value, context) def visit_is_None_predicate(self, node, context): return self.process(node.value, context) def", "[] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets", "node, context): return self.visit_select_item(node, context) def visit_all_columns(self, node, context): return self.visit_select_item(node, context) def", "self.visit_statement(node, context) def visit_rename_table(self, node, context): return self.visit_statement(node, context) def visit_rename_column(self, node, context):", "context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def visit_when_clause(self,", "context): self.process(node.value, context) self.process(node.pattern, context) if node.escape is not None: self.process(node.escape, context) return", "node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_values(self, node, context): for", "def visit_call(self, node, context): return self.visit_node(node, context) def visit_delete(self, node, context): return self.visit_statement(node,", "visit_relation(self, node, context): return self.visit_node(node, context) def visit_query_body(self, node, context): return self.visit_relation(node, context)", "node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def", "context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def visit_when_clause(self, node, context): return", "visit_all_columns(self, node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context)", "return self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def visit_null_if_expression(self, node,", "self.process(node.value, context) self.process(node.pattern, context) if node.escape is not None: self.process(node.escape, context) return None", "return self.visit_node(node, context) def visit_query_body(self, node, context): return self.visit_relation(node, context) def visit_query_specification(self, node,", "def visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context)", "= [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements =", "def visit_row(self, node, context): for expression in node.items: self.process(expression, context) return None def", "node.order_by: self.process(sort_item, context) return None def visit_with(self, node, context): for query in node.queries:", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "context): return self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return self.visit_statement(node, context) def visit_rollback(self,", "def visit_dereference_expression(self, node, context): self.process(node.base, context) return None \"\"\" def visit_window(self, node, context)", "node.frame: self.process(node.frame, context) return None def visit_window_frame(self, node, context) self.process(node.start, context) if node.end:", "context): return self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def visit_show_columns(self,", "visit_show_session(self, node, context): return self.visit_statement(node, context) def visit_set_session(self, node, context): return self.visit_statement(node, context)", "def visit_union(self, node, context): for relation in node.relations: self.process(relation, context) return None def", "context) def visit_query_specification(self, node, context): return self.visit_query_body(node, context) def visit_set_operation(self, node, context): return", "visit_values(self, node, context): return self.visit_query_body(node, context) def visit_row(self, node, context): return self.visit_node(node, context)", "self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node, context): for", "def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node,", "context) def visit_extract(self, node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return", "self.visit_node(node, context) def visit_delete(self, node, context): return self.visit_statement(node, context) def visit_start_transaction(self, node, context):", "context) return None def visit_union(self, node, context): for relation in node.relations: self.process(relation, context)", "self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context):", "def visit_binary_literal(self, node, context): return self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return self.visit_literal(node,", "if node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node, context): for value in", "return self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def visit_join(self, node,", "return None def visit_single_column(self, node, context): self.process(node.expression, context) return None def visit_when_clause(self, node,", "visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context)", "context): return self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def visit_join(self,", "return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def visit_commit(self, node,", "node.order_by: self.process(sort_item, context) return None def visit_union(self, node, context): for relation in node.relations:", "for operand in node.operands: self.process(operand, context) return None def visit_at_time_zone(self, node, context): self.process(node.value,", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "return None def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context) return None def", "context): return self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def visit_in_list_expression(self,", "context) return None def visit_frame_bound(self, node, context) if node.value: self.process(node.value, context) return None", "def visit_show_schemas(self, node, context): return self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return self.visit_statement(node,", "self.process(node.second, context) return None def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context) if", "node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self, node, context):", "node, context): return self.visit_relation(node, context) def visit_query_specification(self, node, context): return self.visit_query_body(node, context) def", "expression in node.values: self.process(expression, context) return None def visit_subscript_expression(self, node, context): self.process(node.base, context)", "def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node,", "context) def visit_call(self, node, context): return self.visit_node(node, context) def visit_delete(self, node, context): return", "def visit_create_view(self, node, context): return self.visit_statement(node, context) def visit_drop_view(self, node, context): return self.visit_statement(node,", "def visit_query_specification(self, node, context): return self.visit_query_body(node, context) def visit_set_operation(self, node, context): return self.visit_query_body(node,", "node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context)", "def visit_all_columns(self, node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node,", "visit_with(self, node, context): for query in node.queries: self.process(query, context) return None def visit_with_query(self,", "node, context): return self.visit_statement(node, context) def visit_create_view(self, node, context): return self.visit_statement(node, context) def", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause in node.when_clauses: self.process(clause, context) if node.default_value:", "in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return None def visit_window_frame(self, node,", "context) def visit_interval_literal(self, node, context): return self.visit_literal(node, context) def visit_in_predicate(self, node, context): return", "-*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0", "def visit_input_reference(self, node, context): return self.visit_expression(node, context) def visit_window(self, node, context): return self.visit_node(node,", "context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_subquery_expression(self,", "self.visit_statement(node, context) def visit_rename_column(self, node, context): return self.visit_statement(node, context) def visit_add_column(self, node, context):", "return self.visit_relation(node, context) def visit_join(self, node, context): return self.visit_relation(node, context) def visit_exists(self, node,", "context) def visit_cast(self, node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left,", "visit_array_constructor(self, node, context): return self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return self.visit_expression(node, context)", "def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def visit_between_predicate(self, node, context): return self.visit_expression(node,", "node, context): for operand in node.operands: self.process(operand, context) return None def visit_at_time_zone(self, node,", "context): return self.visit_node(node, context) def visit_call(self, node, context): return self.visit_node(node, context) def visit_delete(self,", "context): return self.visit_relation(node, context) def visit_join(self, node, context): return self.visit_relation(node, context) def visit_exists(self,", "node, context): for relation in node.relations: self.process(relation, context) return None def visit_intersect(self, node,", "context): return self.visit_query_body(node, context) def visit_union(self, node, context): return self.visit_set_operation(node, context) def visit_intersect(self,", "AstVisitor(object): def process(self, node, context=None): return node.accept(self, context) def visit_node(self, node, context): pass", "self.process(node.query, context) def visit_sort_item(self, node, context): return self.process(node.sort_key, context) def visit_query_specification(self, node, context):", "visit_interval_literal(self, node, context): return self.visit_literal(node, context) def visit_in_predicate(self, node, context): return self.visit_expression(node, context)", "context): return self.visit_statement(node, context) def visit_add_column(self, node, context): return self.visit_statement(node, context) def visit_create_view(self,", "self.process(node.window, context) return None def visit_dereference_expression(self, node, context): self.process(node.base, context) return None \"\"\"", "if node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern,", "context): self.process(node.base, context) return None \"\"\" def visit_window(self, node, context) for expression in", "visit_searched_case_expression(self, node, context): for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context)", "self.visit_literal(node, context) def visit_binary_literal(self, node, context): return self.visit_literal(node, context) def visit_boolean_literal(self, node, context):", "utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the \"License\");", "in node.items: self.process(expression, context) return None def visit_table_subquery(self, node, context): return self.process(node.query, context)", "visit_subquery_expression(self, node, context): return self.process(node.query, context) def visit_sort_item(self, node, context): return self.process(node.sort_key, context)", "node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None", "visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return self.visit_expression(node, context)", "None def visit_table_subquery(self, node, context): return self.process(node.query, context) def visit_aliased_relation(self, node, context): return", "context) def visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return", "context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self,", "context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def visit_between_predicate(self, node, context): return", "node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self, node, context):", "node, context): return self.visit_node(node, context) def visit_window_frame(self, node, context): return self.visit_node(node, context) def", "self.process(relation, context) return None def visit_intersect(self, node, context): for relation in node.relations: self.process(relation,", "clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self,", "self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return", "node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def", "return self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return self.visit_node(node, context) def visit_isolation_level(self, node,", "visit_string_literal(self, node, context): return self.visit_literal(node, context) def visit_binary_literal(self, node, context): return self.visit_literal(node, context)", "None def visit_query(self, node, context): self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item, context)", "self.visit_node(node, context) def visit_single_column(self, node, context): return self.visit_select_item(node, context) def visit_all_columns(self, node, context):", "None def visit_window_frame(self, node, context) self.process(node.start, context) if node.end: self.process(node.end, context) return None", "node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def", "node, context): self.process(node.value, context) self.process(node.time_zone, context) return None def visit_array_constructor(self, node, context): for", "visit_insert(self, node, context): return self.visit_node(node, context) def visit_call(self, node, context): return self.visit_node(node, context)", "node, context): return self.visit_statement(node, context) def visit_drop_view(self, node, context): return self.visit_statement(node, context) def", "return self.visit_node(node, context) def visit_select(self, node, context): return self.visit_node(node, context) def visit_relation(self, node,", "self.process(node.from_, context) if node.where: self.process(node.where, context) if node.group_by: grouping_elements = [] if isinstance(node.group_by,", "visit_long_literal(self, node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context)", "node, context): return self.visit_statement(node, context) def visit_show_functions(self, node, context): return self.visit_statement(node, context) def", "visit_with(self, node, context): return self.visit_node(node, context) def visit_approximate(self, node, context): return self.visit_node(node, context)", "context): return self.visit_expression(node, context) def visit_sort_item(self, node, context): return self.visit_node(node, context) def visit_table(self,", "grouping_element in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context) for sort_item in node.order_by:", "context) return None def visit_function_call(self, node, context): for argument in node.arguments: self.process(argument, context)", "return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def visit_string_literal(self, node,", "self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return self.visit_node(node, context) def visit_isolation_level(self, node, context):", "not use this file except in compliance with the License. # You may", "return self.visit_expression(node, context) def visit_long_literal(self, node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node,", "self.visit_node(node, context) def visit_with(self, node, context): return self.visit_node(node, context) def visit_approximate(self, node, context):", "for grouping_element in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context) for sort_item in", "context) def visit_reset_session(self, node, context): return self.visit_statement(node, context) def visit_current_time(self, node, context): return", "context) def visit_generic_literal(self, node, context): return self.visit_literal(node, context) def visit_time_literal(self, node, context): return", "node, context): return self.visit_node(node, context) def visit_approximate(self, node, context): return self.visit_node(node, context) def", "return None def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context) return None def", "elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context) if", "context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_between_predicate(self,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "context) return None def visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria,", "context): return self.visit_expression(node, context) def visit_null_literal(self, node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self,", "return self.visit_node(node, context) def visit_table(self, node, context): return self.visit_query_body(node, context) def visit_unnest(self, node,", "context) def visit_rollback(self, node, context): return self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return", "context): return self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return self.visit_node(node, context) def visit_isolation_level(self,", "governing permissions and # limitations under the License. from .join_criteria import JoinOn, JoinUsing", "agreed to in writing, software # distributed under the License is distributed on", "def visit_intersect(self, node, context): return self.visit_set_operation(node, context) def visit_except(self, node, context): return self.visit_set_operation(node,", "self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def visit_commit(self, node, context):", "context): return self.visit_statement(node, context) def visit_explain(self, node, context): return self.visit_statement(node, context) def visit_show_tables(self,", "context): return self.visit_statement(node, context) def visit_grant(self, node, context): return self.visit_statement(node, context) def visit_transaction_mode(self,", "node.having: self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item, context) return None def visit_union(self,", "return self.process(node.expression, context) def visit_cast(self, node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node,", "self.process(node.value, context) self.process(node.time_zone, context) return None def visit_array_constructor(self, node, context): for expression in", "node, context): self.process(node.base, context) return None \"\"\" def visit_window(self, node, context) for expression", "context) if node.escape is not None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node,", "node, context): return self.process(node.value, context) def visit_not_expression(self, node, context): return self.process(node.value, context) def", "GroupingSets): grouping_elements = node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having,", "self.process(item, context) return None def visit_single_column(self, node, context): self.process(node.expression, context) return None def", "def visit_drop_table(self, node, context): return self.visit_statement(node, context) def visit_rename_table(self, node, context): return self.visit_statement(node,", "context): return self.visit_query_body(node, context) def visit_set_operation(self, node, context): return self.visit_query_body(node, context) def visit_union(self,", "self.process(node.query, context) def visit_select(self, node, context): for item in node.select_items: self.process(item, context) return", "for sort_item in node.order_by: self.process(sort_item, context) return None def visit_union(self, node, context): for", "node, context): return self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context)", "for sort_item in node.order_by: self.process(sort_item, context) return None def visit_with(self, node, context): for", "visit_frame_bound(self, node, context): return self.visit_node(node, context) def visit_call_argument(self, node, context): return self.visit_node(node, context)", "context): return self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def visit_sort_item(self,", "context) return None def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context)", "def visit_query_body(self, node, context): return self.visit_relation(node, context) def visit_query_specification(self, node, context): return self.visit_query_body(node,", "self.visit_node(node, context) def visit_table(self, node, context): return self.visit_query_body(node, context) def visit_unnest(self, node, context):", "visit_double_literal(self, node, context): return self.visit_literal(node, context) def visit_statement(self, node, context): return self.visit_node(node, context)", "context): return self.visit_statement(node, context) def visit_show_session(self, node, context): return self.visit_statement(node, context) def visit_set_session(self,", "self.visit_query_body(node, context) def visit_row(self, node, context): return self.visit_node(node, context) def visit_table_subquery(self, node, context):", "to in writing, software # distributed under the License is distributed on an", "context) def visit_join(self, node, context): return self.visit_relation(node, context) def visit_exists(self, node, context): return", "context): return self.visit_statement(node, context) def visit_show_partitions(self, node, context): return self.visit_statement(node, context) def visit_show_functions(self,", "def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos) def visit_subquery_expression(self, node, context): return None", "implied. # See the License for the specific language governing permissions and #", "visit_window(self, node, context): return self.visit_node(node, context) def visit_window_frame(self, node, context): return self.visit_node(node, context)", "self.process(node.right, context) return None def visit_values(self, node, context): for row in node.rows: self.process(row,", "context): return self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def visit_literal(self,", "self.visit_expression(node, context) def visit_not_expression(self, node, context): return self.visit_expression(node, context) def visit_select_item(self, node, context):", "self.visit_expression(node, context) def visit_literal(self, node, context): return self.visit_expression(node, context) def visit_double_literal(self, node, context):", "return self.visit_statement(node, context) def visit_use(self, node, context): return self.visit_statement(node, context) def visit_show_session(self, node,", "node, context): for item in node.select_items: self.process(item, context) return None def visit_single_column(self, node,", "context) def visit_array_constructor(self, node, context): return self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return", "context): self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item, context) return None def visit_with(self,", "context) return None def visit_values(self, node, context): for row in node.rows: self.process(row, context)", "self.process(node.false_value, context) return None def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None def", "node, context): return self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def", "context): return self.visit_statement(node, context) def visit_create_view(self, node, context): return self.visit_statement(node, context) def visit_drop_view(self,", "self.visit_node(node, context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context):", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "def visit_select_item(self, node, context): return self.visit_node(node, context) def visit_single_column(self, node, context): return self.visit_select_item(node,", "def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return self.visit_expression(node,", "self.visit_expression(node, context) def visit_try_expression(self, node, context): return self.visit_expression(node, context) def visit_cast(self, node, context):", "return self.visit_query_body(node, context) def visit_unnest(self, node, context): return self.visit_relation(node, context) def visit_values(self, node,", "return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos) def visit_subquery_expression(self,", "None def visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_values(self,", "visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return None def visit_coalesce_expression(self,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "node, context): self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item, context) return None def", "self.process(node.query, context) def visit_aliased_relation(self, node, context): return self.process(node.relation, context) def visit_sampled_relation(self, node, context):", "self.process(node.expression, context) def visit_cast(self, node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context):", "context): return self.visit_node(node, context) def visit_frame_bound(self, node, context): return self.visit_node(node, context) def visit_call_argument(self,", "node, context): return self.visit_statement(node, context) def visit_rollback(self, node, context): return self.visit_statement(node, context) def", "def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_array_constructor(self, node, context): return self.visit_expression(node,", "None def visit_subquery_expression(self, node, context): return self.process(node.query, context) def visit_sort_item(self, node, context): return", "def visit_subquery_expression(self, node, context): return self.process(node.query, context) def visit_sort_item(self, node, context): return self.process(node.sort_key,", "node, context): return self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def", "node, context): self.process(node.select, context) if node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context) if", "in node.values: self.process(value, context) return None def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "self.visit_expression(node, context) def visit_like_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context):", "you may not use this file except in compliance with the License. #", "return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_array_constructor(self, node,", "self.visit_statement(node, context) def visit_add_column(self, node, context): return self.visit_statement(node, context) def visit_create_view(self, node, context):", "visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_between_predicate(self, node, context):", "return self.visit_relation(node, context) def visit_exists(self, node, context): return self.visit_expression(node, context) def visit_try_expression(self, node,", "node, context): return self.visit_node(node, context) def visit_table_element(self, node, context): return self.visit_node(node, context) def", "node, context): return self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class", "context): return self.visit_statement(node, context) def visit_set_session(self, node, context): return self.visit_statement(node, context) def visit_generic_literal(self,", "self.process(node.right, context) return None def visit_query(self, node, context): self.process(node.query_body, context) for sort_item in", "return self.visit_statement(node, context) def visit_create_view(self, node, context): return self.visit_statement(node, context) def visit_drop_view(self, node,", "node, context): return self.visit_relation(node, context) def visit_join(self, node, context): return self.visit_relation(node, context) def", "context) self.process(node.start, context) if node.end: self.process(node.end, context) return None def visit_frame_bound(self, node, context)", "context) def visit_in_predicate(self, node, context): return self.visit_expression(node, context) def visit_function_call(self, node, context): return", "JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node, context=None):", "def visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def visit_not_expression(self, node, context): return self.process(node.value,", "node, context): return self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def", "context) def visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return", "context) self.process(node.max, context) return None def visit_coalesce_expression(self, node, context): for operand in node.operands:", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "context) return None def visit_table_subquery(self, node, context): return self.process(node.query, context) def visit_aliased_relation(self, node,", "def visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def visit_show_columns(self, node, context): return self.visit_statement(node,", "node.values: self.process(value, context) return None def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context)", "return self.visit_node(node, context) def visit_approximate(self, node, context): return self.visit_node(node, context) def visit_with_query(self, node,", "def visit_row(self, node, context): return self.visit_node(node, context) def visit_table_subquery(self, node, context): return self.visit_query_body(node,", "return None def visit_query(self, node, context): self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item,", "node, context): return self.visit_node(node, context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def", "def visit_like_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node,", "context): return self.process(node.query, context) def visit_select(self, node, context): for item in node.select_items: self.process(item,", "visit_coalesce_expression(self, node, context): for operand in node.operands: self.process(operand, context) return None def visit_at_time_zone(self,", "return self.visit_set_operation(node, context) def visit_intersect(self, node, context): return self.visit_set_operation(node, context) def visit_except(self, node,", "context) return None def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None", "for relation in node.relations: self.process(relation, context) return None def visit_except(self, node, context): self.process(node.left,", "self.visit_expression(node, context) def visit_null_literal(self, node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context):", "return self.visit_query_body(node, context) def visit_set_operation(self, node, context): return self.visit_query_body(node, context) def visit_union(self, node,", "node, context): return self.visit_expression(node, context) def visit_if_expression(self, node, context): return self.visit_expression(node, context) def", "for value in node.values: self.process(value, context) return None def visit_None_if_expression(self, node, context): self.process(node.first,", "if node.escape is not None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node, context):", "specific language governing permissions and # limitations under the License. from .join_criteria import", "for expression in node.partition: self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key, context) if", "context) self.process(node.value_list, context) return None def visit_function_call(self, node, context): for argument in node.arguments:", "node, context): return self.visit_expression(node, context) def visit_cast(self, node, context): return self.visit_expression(node, context) def", "return self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def visit_literal(self, node,", "node, context): return self.visit_expression(node, context) def visit_double_literal(self, node, context): return self.visit_literal(node, context) def", "self.visit_node(node, context) def visit_with_query(self, node, context): return self.visit_node(node, context) def visit_select(self, node, context):", "context) def visit_frame_bound(self, node, context): return self.visit_node(node, context) def visit_call_argument(self, node, context): return", "def visit_commit(self, node, context): return self.visit_statement(node, context) def visit_rollback(self, node, context): return self.visit_statement(node,", "context): return self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if", "def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in", "context) def visit_show_columns(self, node, context): return self.visit_statement(node, context) def visit_show_partitions(self, node, context): return", "return self.visit_relation(node, context) def visit_query_specification(self, node, context): return self.visit_query_body(node, context) def visit_set_operation(self, node,", "context): return self.visit_statement(node, context) def visit_drop_table(self, node, context): return self.visit_statement(node, context) def visit_rename_table(self,", "return self.visit_node(node, context) def visit_with(self, node, context): return self.visit_node(node, context) def visit_approximate(self, node,", "self.visit_expression(node, context) def visit_double_literal(self, node, context): return self.visit_literal(node, context) def visit_statement(self, node, context):", "visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def visit_not_expression(self, node, context): return self.process(node.value, context)", "None def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_query(self,", "operand in node.operands: self.process(operand, context) return None def visit_at_time_zone(self, node, context): self.process(node.value, context)", "def visit_is_None_predicate(self, node, context): return self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context)", "def visit_coalesce_expression(self, node, context): for operand in node.operands: self.process(operand, context) return None def", "def visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_values(self, node,", "node.arguments: self.process(argument, context) if node.window: self.process(node.window, context) return None def visit_dereference_expression(self, node, context):", "return self.process(node.value, context) def visit_is_None_predicate(self, node, context): return self.process(node.value, context) def visit_logical_binary_expression(self, node,", "visit_explain_option(self, node, context): return self.visit_node(node, context) def visit_with(self, node, context): return self.visit_node(node, context)", "context): return self.visit_literal(node, context) def visit_binary_literal(self, node, context): return self.visit_literal(node, context) def visit_boolean_literal(self,", "visit_input_reference(self, node, context): return self.visit_expression(node, context) def visit_window(self, node, context): return self.visit_node(node, context)", "context) def visit_rename_column(self, node, context): return self.visit_statement(node, context) def visit_add_column(self, node, context): return", "visit_extract(self, node, context): return self.process(node.expression, context) def visit_cast(self, node, context): return self.process(node.expression, context)", "context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context)", "context): return self.visit_query_body(node, context) def visit_row(self, node, context): return self.visit_node(node, context) def visit_table_subquery(self,", "self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return", "self.process(node.first, context) self.process(node.second, context) return None def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value,", "node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context) for sort_item", "node, context): return self.visit_expression(node, context) def visit_string_literal(self, node, context): return self.visit_literal(node, context) def", "return None def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def", "context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def visit_drop_table(self, node, context): return", "self.process(node.frame, context) return None def visit_window_frame(self, node, context) self.process(node.start, context) if node.end: self.process(node.end,", "visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context)", "visit_call_argument(self, node, context): return self.visit_node(node, context) def visit_table_element(self, node, context): return self.visit_node(node, context)", "node, context): return self.visit_node(node, context) def visit_relation(self, node, context): return self.visit_node(node, context) def", "self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return self.visit_statement(node, context) def visit_rollback(self, node, context):", "context) for sort_item in node.order_by: self.process(sort_item, context) return None def visit_with(self, node, context):", "def visit_extract(self, node, context): return self.process(node.expression, context) def visit_cast(self, node, context): return self.process(node.expression,", "None def visit_frame_bound(self, node, context) if node.value: self.process(node.value, context) return None \"\"\" def", "self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_array_constructor(self, node, context):", "def visit_window_frame(self, node, context) self.process(node.start, context) if node.end: self.process(node.end, context) return None def", "self.visit_query_body(node, context) def visit_unnest(self, node, context): return self.visit_relation(node, context) def visit_values(self, node, context):", "self.process(sort_item, context) return None def visit_union(self, node, context): for relation in node.relations: self.process(relation,", "visit_show_partitions(self, node, context): return self.visit_statement(node, context) def visit_show_functions(self, node, context): return self.visit_statement(node, context)", "context) return None def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self,", "grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item, context)", "self.process(argument, context) if node.window: self.process(node.window, context) return None def visit_dereference_expression(self, node, context): self.process(node.base,", "self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item, context) return None def visit_union(self, node,", "visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return self.visit_expression(node, context)", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.visit_relation(node, context) def visit_join(self, node, context): return self.visit_relation(node, context) def visit_exists(self, node, context):", "def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context) return None def visit_function_call(self, node,", "context): return self.visit_statement(node, context) def visit_show_schemas(self, node, context): return self.visit_statement(node, context) def visit_show_catalogs(self,", "context): return self.visit_expression(node, context) def visit_extract(self, node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self,", "context): return self.visit_node(node, context) def visit_with_query(self, node, context): return self.visit_node(node, context) def visit_select(self,", "visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_array_constructor(self, node, context): return self.visit_expression(node, context)", "def visit_current_time(self, node, context): return self.visit_expression(node, context) def visit_extract(self, node, context): return self.visit_expression(node,", "node, context): return self.visit_node(node, context) def visit_call(self, node, context): return self.visit_node(node, context) def", "self.visit_statement(node, context) def visit_start_transaction(self, node, context): return self.visit_statement(node, context) def visit_grant(self, node, context):", "context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context) return", "visit_sort_item(self, node, context): return self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select, context) if", "self.process(node.left, context) self.process(node.right, context) return None def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min,", "node, context): for argument in node.arguments: self.process(argument, context) if node.window: self.process(node.window, context) return", "context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def visit_not_expression(self,", "return self.visit_set_operation(node, context) def visit_except(self, node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node,", "return None def visit_intersect(self, node, context): for relation in node.relations: self.process(relation, context) return", "self.visit_node(node, context) def visit_approximate(self, node, context): return self.visit_node(node, context) def visit_with_query(self, node, context):", "See the License for the specific language governing permissions and # limitations under", "context): return self.visit_node(node, context) def visit_with(self, node, context): return self.visit_node(node, context) def visit_approximate(self,", "return self.visit_statement(node, context) def visit_add_column(self, node, context): return self.visit_statement(node, context) def visit_create_view(self, node,", "return self.visit_statement(node, context) def visit_insert(self, node, context): return self.visit_node(node, context) def visit_call(self, node,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def", "visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node, context): return self.process(node.value,", "visit_rename_table(self, node, context): return self.visit_statement(node, context) def visit_rename_column(self, node, context): return self.visit_statement(node, context)", "visit_drop_table(self, node, context): return self.visit_statement(node, context) def visit_rename_table(self, node, context): return self.visit_statement(node, context)", "return self.visit_node(node, context) def visit_create_table(self, node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node,", "context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def visit_commit(self,", "context) if node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif", "context): self.process(node.select, context) if node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context) if node.group_by:", "context) def visit_values(self, node, context): return self.visit_query_body(node, context) def visit_row(self, node, context): return", "def visit_statement(self, node, context): return self.visit_node(node, context) def visit_query(self, node, context): return self.visit_statement(node,", "node, context): for value in node.values: self.process(value, context) return None def visit_None_if_expression(self, node,", "visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return None", "return self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def visit_in_list_expression(self, node,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause in node.when_clauses: self.process(clause,", "self.visit_select_item(node, context) def visit_all_columns(self, node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context):", "def visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return self.visit_expression(node,", "expression in node.items: self.process(expression, context) return None def visit_table_subquery(self, node, context): return self.process(node.query,", "self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select, context) if node.from_: self.process(node.from_, context) if", "context) if node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self, node, context): self.process(node.inner_expression, context)", "self.process(node.left, context) self.process(node.right, context) return None def visit_query(self, node, context): self.process(node.query_body, context) for", "in node.order_by: self.process(sort_item, context) return None def visit_union(self, node, context): for relation in", "self.visit_statement(node, context) def visit_use(self, node, context): return self.visit_statement(node, context) def visit_show_session(self, node, context):", "visit_extract(self, node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context)", "self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def visit_dereference_expression(self, node, context):", "context) def visit_null_literal(self, node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return", "self.process(expression, context) return None def visit_table_subquery(self, node, context): return self.process(node.query, context) def visit_aliased_relation(self,", "context): return self.visit_expression(node, context) def visit_input_reference(self, node, context): return self.visit_expression(node, context) def visit_window(self,", "JoinUsing from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node, context=None): return", "context): for value in node.values: self.process(value, context) return None def visit_None_if_expression(self, node, context):", "context) return None def visit_window_frame(self, node, context) self.process(node.start, context) if node.end: self.process(node.end, context)", "node, context): return self.visit_node(node, context) def visit_frame_bound(self, node, context): return self.visit_node(node, context) def", "self.visit_statement(node, context) def visit_drop_table(self, node, context): return self.visit_statement(node, context) def visit_rename_table(self, node, context):", "node, context): return self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return self.visit_node(node, context) def", "context) return None def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context) return None", "def visit_interval_literal(self, node, context): return self.visit_literal(node, context) def visit_in_predicate(self, node, context): return self.visit_expression(node,", "context) return None def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context) return None", "def visit_window(self, node, context) for expression in node.partition: self.process(expression, context) for sort_item in", "context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def visit_subquery_expression(self,", "self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item, context) return None def visit_with(self, node,", "SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element in", "def visit_window_frame(self, node, context): return self.visit_node(node, context) def visit_frame_bound(self, node, context): return self.visit_node(node,", "KIND, either express or implied. # See the License for the specific language", "context): return self.visit_select_item(node, context) def visit_all_columns(self, node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self,", "visit_when_clause(self, node, context): return self.visit_expression(node, context) def visit_interval_literal(self, node, context): return self.visit_literal(node, context)", "return self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select, context) if node.from_: self.process(node.from_, context)", "def visit_if_expression(self, node, context): return self.visit_expression(node, context) def visit_null_literal(self, node, context): return self.visit_literal(node,", "return self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def visit_show_columns(self, node,", "return self.visit_statement(node, context) def visit_rename_table(self, node, context): return self.visit_statement(node, context) def visit_rename_column(self, node,", "context) def visit_unnest(self, node, context): return self.visit_relation(node, context) def visit_values(self, node, context): return", "visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return self.visit_statement(node, context)", "for argument in node.arguments: self.process(argument, context) if node.window: self.process(node.window, context) return None def", "return self.visit_statement(node, context) def visit_rollback(self, node, context): return self.visit_statement(node, context) def visit_at_time_zone(self, node,", "DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression, context) def visit_cast(self, node, context): return", "visit_query(self, node, context): self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item, context) return None", "self.visit_node(node, context) def visit_query(self, node, context): return self.visit_statement(node, context) def visit_explain(self, node, context):", "context): self.process(node.expression, context) return None def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context)", "node, context): return self.visit_expression(node, context) def visit_array_constructor(self, node, context): return self.visit_expression(node, context) def", "context): return self.visit_literal(node, context) def visit_in_predicate(self, node, context): return self.visit_expression(node, context) def visit_function_call(self,", "argument in node.arguments: self.process(argument, context) if node.window: self.process(node.window, context) return None def visit_dereference_expression(self,", "query in node.queries: self.process(query, context) return None def visit_with_query(self, node, context): return self.process(node.query,", "visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def visit_literal(self, node, context): return self.visit_expression(node, context)", "ANY KIND, either express or implied. # See the License for the specific", "def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_subquery_expression(self, node,", "def visit_frame_bound(self, node, context): return self.visit_node(node, context) def visit_call_argument(self, node, context): return self.visit_node(node,", "context): return self.process(node.value, context) def visit_is_None_predicate(self, node, context): return self.process(node.value, context) def visit_logical_binary_expression(self,", "visit_transaction_mode(self, node, context): return self.visit_node(node, context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context)", "node, context): return self.visit_expression(node, context) def visit_literal(self, node, context): return self.visit_expression(node, context) def", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "node, context): return self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def", "def visit_with_query(self, node, context): return self.process(node.query, context) def visit_select(self, node, context): for item", "context) return None def visit_intersect(self, node, context): for relation in node.relations: self.process(relation, context)", "context) def visit_current_time(self, node, context): return self.visit_expression(node, context) def visit_extract(self, node, context): return", "return None def visit_values(self, node, context): for row in node.rows: self.process(row, context) return", "None def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context) return None def visit_array_constructor(self,", "node, context): for expression in node.items: self.process(expression, context) return None def visit_table_subquery(self, node,", "visit_in_predicate(self, node, context): return self.visit_expression(node, context) def visit_function_call(self, node, context): return self.visit_expression(node, context)", "visit_if_expression(self, node, context): return self.visit_expression(node, context) def visit_null_literal(self, node, context): return self.visit_literal(node, context)", "context): for item in node.select_items: self.process(item, context) return None def visit_single_column(self, node, context):", "context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_array_constructor(self, node, context): return", "visit_create_view(self, node, context): return self.visit_statement(node, context) def visit_drop_view(self, node, context): return self.visit_statement(node, context)", "context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression, context) def visit_cast(self, node,", "context) def visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return", "visit_values(self, node, context): for row in node.rows: self.process(row, context) return None def visit_row(self,", "self.visit_statement(node, context) def visit_set_session(self, node, context): return self.visit_statement(node, context) def visit_generic_literal(self, node, context):", "visit_select_item(self, node, context): return self.visit_node(node, context) def visit_single_column(self, node, context): return self.visit_select_item(node, context)", "self.visit_expression(node, context) def visit_input_reference(self, node, context): return self.visit_expression(node, context) def visit_window(self, node, context):", "visit_row(self, node, context): for expression in node.items: self.process(expression, context) return None def visit_table_subquery(self,", "node, context): return self.visit_statement(node, context) def visit_generic_literal(self, node, context): return self.visit_literal(node, context) def", "context) return None def visit_single_column(self, node, context): self.process(node.expression, context) return None def visit_when_clause(self,", "self.visit_expression(node, context) def visit_between_predicate(self, node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context):", "def visit_when_clause(self, node, context): return self.visit_expression(node, context) def visit_interval_literal(self, node, context): return self.visit_literal(node,", "context) def visit_with_query(self, node, context): return self.visit_node(node, context) def visit_select(self, node, context): return", "for relation in node.relations: self.process(relation, context) return None def visit_intersect(self, node, context): for", "context) def visit_except(self, node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return", "return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def visit_like_predicate(self, node,", "return self.visit_query_body(node, context) def visit_row(self, node, context): return self.visit_node(node, context) def visit_table_subquery(self, node,", "visit_in_list_expression(self, node, context): for value in node.values: self.process(value, context) return None def visit_None_if_expression(self,", "visit_current_time(self, node, context): return self.visit_expression(node, context) def visit_extract(self, node, context): return self.visit_expression(node, context)", "context) return None \"\"\" def visit_window(self, node, context) for expression in node.partition: self.process(expression,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "context) def visit_if_expression(self, node, context): return self.visit_expression(node, context) def visit_null_literal(self, node, context): return", "def visit_sort_item(self, node, context): return self.visit_node(node, context) def visit_table(self, node, context): return self.visit_query_body(node,", "context): return self.visit_expression(node, context) def visit_double_literal(self, node, context): return self.visit_literal(node, context) def visit_statement(self,", "visit_table_element(self, node, context): return self.visit_node(node, context) def visit_create_table(self, node, context): return self.visit_statement(node, context)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "context): return self.visit_statement(node, context) def visit_rollback(self, node, context): return self.visit_statement(node, context) def visit_at_time_zone(self,", "context) return None def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value:", "visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return self.visit_relation(node, context)", "context): return self.visit_statement(node, context) def visit_use(self, node, context): return self.visit_statement(node, context) def visit_show_session(self,", "applicable law or agreed to in writing, software # distributed under the License", "node.relations: self.process(relation, context) return None def visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context)", "node, context): return self.visit_relation(node, context) def visit_values(self, node, context): return self.visit_query_body(node, context) def", "context) def visit_string_literal(self, node, context): return self.visit_literal(node, context) def visit_binary_literal(self, node, context): return", "None def visit_coalesce_expression(self, node, context): for operand in node.operands: self.process(operand, context) return None", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def visit_long_literal(self, node, context):", "node, context): return self.visit_expression(node, context) def visit_extract(self, node, context): return self.visit_expression(node, context) def", "context): self.process(node.first, context) self.process(node.second, context) return None def visit_if_expression(self, node, context): self.process(node.condition, context)", "def visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node,", "context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self,", "self.visit_literal(node, context) def visit_when_clause(self, node, context): return self.visit_expression(node, context) def visit_interval_literal(self, node, context):", "node, context): return self.visit_statement(node, context) def visit_add_column(self, node, context): return self.visit_statement(node, context) def", "writing, software # distributed under the License is distributed on an \"AS IS\"", "self.visit_expression(node, context) def visit_window(self, node, context): return self.visit_node(node, context) def visit_window_frame(self, node, context):", "visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context)", "-*- # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "sort_item in node.order_by: self.process(sort_item, context) return None def visit_with(self, node, context): for query", "visit_select(self, node, context): return self.visit_node(node, context) def visit_relation(self, node, context): return self.visit_node(node, context)", "context) def visit_time_literal(self, node, context): return self.visit_literal(node, context) def visit_explain_option(self, node, context): return", "return None def visit_in_list_expression(self, node, context): for value in node.values: self.process(value, context) return", "def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause in node.when_clauses: self.process(clause, context) if", "context) def visit_not_expression(self, node, context): return self.process(node.value, context) def visit_searched_case_expression(self, node, context): for", "compliance with the License. # You may obtain a copy of the License", "return None def visit_subquery_expression(self, node, context): return self.process(node.query, context) def visit_sort_item(self, node, context):", "if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self, node,", "in node.arguments: self.process(argument, context) if node.window: self.process(node.window, context) return None def visit_dereference_expression(self, node,", "self.process(value, context) return None def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context) return", "context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression, context)", "context) if node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node, context): for value", "self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def visit_sampled_relation(self, node, context):", "def visit_use(self, node, context): return self.visit_statement(node, context) def visit_show_session(self, node, context): return self.visit_statement(node,", "context) def visit_between_predicate(self, node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return", "visit_window(self, node, context) for expression in node.partition: self.process(expression, context) for sort_item in node.order_by:", "None def visit_values(self, node, context): for row in node.rows: self.process(row, context) return None", "def visit_not_expression(self, node, context): return self.visit_expression(node, context) def visit_select_item(self, node, context): return self.visit_node(node,", "self.visit_expression(node, context) def visit_function_call(self, node, context): return self.visit_expression(node, context) def visit_lambda_expression(self, node, context):", "return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def visit_between_predicate(self, node,", "node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def", "context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def visit_dereference_expression(self,", "context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def visit_between_predicate(self,", "self.visit_node(node, context) def visit_table_element(self, node, context): return self.visit_node(node, context) def visit_create_table(self, node, context):", "visit_show_columns(self, node, context): return self.visit_statement(node, context) def visit_show_partitions(self, node, context): return self.visit_statement(node, context)", "visit_show_functions(self, node, context): return self.visit_statement(node, context) def visit_use(self, node, context): return self.visit_statement(node, context)", "self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def", "context): return self.visit_set_operation(node, context) def visit_intersect(self, node, context): return self.visit_set_operation(node, context) def visit_except(self,", "context): return self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def visit_long_literal(self,", "None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def", "node, context): self.process(node.value, context) self.process(node.value_list, context) return None def visit_function_call(self, node, context): for", "value in node.values: self.process(value, context) return None def visit_None_if_expression(self, node, context): self.process(node.first, context)", "if node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context) if node.group_by: grouping_elements = []", "context): return self.visit_node(node, context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self,", "context) def visit_sort_item(self, node, context): return self.visit_node(node, context) def visit_table(self, node, context): return", "if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor):", "node, context): return self.visit_expression(node, context) def visit_interval_literal(self, node, context): return self.visit_literal(node, context) def", "context): return self.visit_node(node, context) def visit_reset_session(self, node, context): return self.visit_statement(node, context) def visit_current_time(self,", "context) def visit_show_functions(self, node, context): return self.visit_statement(node, context) def visit_use(self, node, context): return", "context) return None def visit_dereference_expression(self, node, context): self.process(node.base, context) return None \"\"\" def", "(the \"License\"); # you may not use this file except in compliance with", "visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return self.visit_relation(node, context)", "self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self,", "def visit_single_column(self, node, context): self.process(node.expression, context) return None def visit_when_clause(self, node, context): self.process(node.operand,", "context): return self.visit_node(node, context) def visit_single_column(self, node, context): return self.visit_select_item(node, context) def visit_all_columns(self,", "in node.operands: self.process(operand, context) return None def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone,", "# Unless required by applicable law or agreed to in writing, software #", "self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def visit_between_predicate(self, node, context):", "by applicable law or agreed to in writing, software # distributed under the", "def visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def visit_literal(self, node, context): return self.visit_expression(node,", "def visit_insert(self, node, context): return self.visit_node(node, context) def visit_call(self, node, context): return self.visit_node(node,", "context): self.process(node.value, context) self.process(node.time_zone, context) return None def visit_array_constructor(self, node, context): for expression", "context): for expression in node.items: self.process(expression, context) return None def visit_table_subquery(self, node, context):", "context) def visit_use(self, node, context): return self.visit_statement(node, context) def visit_show_session(self, node, context): return", "context): return self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return", "def visit_query(self, node, context): self.process(node.query_body, context) for sort_item in node.order_by: self.process(sort_item, context) return", "context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing):", "def visit_values(self, node, context): return self.visit_query_body(node, context) def visit_row(self, node, context): return self.visit_node(node,", "file except in compliance with the License. # You may obtain a copy", "grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements", "self.process(clause, context) if node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self, node, context): self.process(node.value,", "visit_generic_literal(self, node, context): return self.visit_literal(node, context) def visit_time_literal(self, node, context): return self.visit_literal(node, context)", "self.visit_statement(node, context) def visit_show_schemas(self, node, context): return self.visit_statement(node, context) def visit_show_catalogs(self, node, context):", "visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_subquery_expression(self, node, context):", "def visit_select(self, node, context): for item in node.select_items: self.process(item, context) return None def", "context): return self.visit_expression(node, context) def visit_not_expression(self, node, context): return self.visit_expression(node, context) def visit_select_item(self,", "node, context): return self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def", "self.visit_expression(node, context) def visit_select_item(self, node, context): return self.visit_node(node, context) def visit_single_column(self, node, context):", "context) def visit_show_session(self, node, context): return self.visit_statement(node, context) def visit_set_session(self, node, context): return", "context) def visit_create_view(self, node, context): return self.visit_statement(node, context) def visit_drop_view(self, node, context): return", "self.visit_node(node, context) def visit_reset_session(self, node, context): return self.visit_statement(node, context) def visit_current_time(self, node, context):", "self.visit_query_body(node, context) def visit_set_operation(self, node, context): return self.visit_query_body(node, context) def visit_union(self, node, context):", "def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context) return None def visit_in_predicate(self, node,", "context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return", "context) self.process(node.right, context) return None def visit_subquery_expression(self, node, context): return self.process(node.query, context) def", "context): return self.visit_statement(node, context) def visit_rename_table(self, node, context): return self.visit_statement(node, context) def visit_rename_column(self,", "context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_array_constructor(self,", "self.visit_statement(node, context) def visit_explain(self, node, context): return self.visit_statement(node, context) def visit_show_tables(self, node, context):", "context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return None def visit_coalesce_expression(self, node, context):", "node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context)", "context) return None def visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def visit_not_expression(self, node,", "context) def visit_double_literal(self, node, context): return self.visit_literal(node, context) def visit_statement(self, node, context): return", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "node.select_items: self.process(item, context) return None def visit_single_column(self, node, context): self.process(node.expression, context) return None", "context) def visit_window(self, node, context): return self.visit_node(node, context) def visit_window_frame(self, node, context): return", "self.visit_expression(node, context) def visit_interval_literal(self, node, context): return self.visit_literal(node, context) def visit_in_predicate(self, node, context):", "def visit_in_list_expression(self, node, context): for value in node.values: self.process(value, context) return None def", "context): return self.visit_node(node, context) def visit_approximate(self, node, context): return self.visit_node(node, context) def visit_with_query(self,", "return None def visit_array_constructor(self, node, context): for expression in node.values: self.process(expression, context) return", "visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif", "in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item,", "node, context): return self.visit_literal(node, context) def visit_statement(self, node, context): return self.visit_node(node, context) def", "return self.visit_statement(node, context) def visit_drop_view(self, node, context): return self.visit_statement(node, context) def visit_insert(self, node,", "\"\"\" def visit_window(self, node, context) for expression in node.partition: self.process(expression, context) for sort_item", "self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self, node,", "permissions and # limitations under the License. from .join_criteria import JoinOn, JoinUsing from", "context) def visit_show_tables(self, node, context): return self.visit_statement(node, context) def visit_show_schemas(self, node, context): return", "visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def visit_when_clause(self, node, context): return self.visit_expression(node, context)", "return self.visit_statement(node, context) def visit_drop_table(self, node, context): return self.visit_statement(node, context) def visit_rename_table(self, node,", "context) def visit_binary_literal(self, node, context): return self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return", "return None def visit_function_call(self, node, context): for argument in node.arguments: self.process(argument, context) if", "def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return self.visit_expression(node,", "context): self.process(node.left, context) self.process(node.right, context) return None def visit_between_predicate(self, node, context): self.process(node.value, context)", "context) def visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return", "self.process(relation, context) return None def visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context) return", "self.visit_literal(node, context) def visit_in_predicate(self, node, context): return self.visit_expression(node, context) def visit_function_call(self, node, context):", "context): return self.visit_node(node, context) def visit_relation(self, node, context): return self.visit_node(node, context) def visit_query_body(self,", "node.escape is not None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node, context): return", "None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos) def visit_subquery_expression(self, node,", "visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_values(self, node, context):", "def visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return self.visit_relation(node,", "def visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def visit_if_expression(self, node, context): return self.visit_expression(node,", "visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context) if node.escape is not None: self.process(node.escape,", "visit_frame_bound(self, node, context) if node.value: self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self, node,", "context) def visit_relation(self, node, context): return self.visit_node(node, context) def visit_query_body(self, node, context): return", "node, context): self.process(node.value, context) self.process(node.pattern, context) if node.escape is not None: self.process(node.escape, context)", "visit_start_transaction(self, node, context): return self.visit_statement(node, context) def visit_grant(self, node, context): return self.visit_statement(node, context)", "return self.visit_statement(node, context) def visit_show_schemas(self, node, context): return self.visit_statement(node, context) def visit_show_catalogs(self, node,", "JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self,", "context) def visit_try_expression(self, node, context): return self.visit_expression(node, context) def visit_cast(self, node, context): return", "context): for row in node.rows: self.process(row, context) return None def visit_row(self, node, context):", "return self.visit_statement(node, context) def visit_current_time(self, node, context): return self.visit_expression(node, context) def visit_extract(self, node,", "return self.visit_literal(node, context) def visit_statement(self, node, context): return self.visit_node(node, context) def visit_query(self, node,", "self.visit_statement(node, context) def visit_show_functions(self, node, context): return self.visit_statement(node, context) def visit_use(self, node, context):", "self.process(node.value, context) self.process(node.value_list, context) return None def visit_function_call(self, node, context): for argument in", "self.visit_statement(node, context) def visit_create_view(self, node, context): return self.visit_statement(node, context) def visit_drop_view(self, node, context):", "return self.visit_statement(node, context) def visit_explain(self, node, context): return self.visit_statement(node, context) def visit_show_tables(self, node,", "def visit_union(self, node, context): return self.visit_set_operation(node, context) def visit_intersect(self, node, context): return self.visit_set_operation(node,", "context) return None def visit_query(self, node, context): self.process(node.query_body, context) for sort_item in node.order_by:", "def visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return self.visit_relation(node,", "visit_set_operation(self, node, context): return self.visit_query_body(node, context) def visit_union(self, node, context): return self.visit_set_operation(node, context)", "return self.visit_statement(node, context) def visit_generic_literal(self, node, context): return self.visit_literal(node, context) def visit_time_literal(self, node,", "node, context): return self.visit_literal(node, context) def visit_when_clause(self, node, context): return self.visit_expression(node, context) def", "context): return self.visit_node(node, context) def visit_create_table(self, node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self,", "def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_query(self, node,", "context) def visit_not_expression(self, node, context): return self.visit_expression(node, context) def visit_select_item(self, node, context): return", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def visit_show_columns(self, node, context): return self.visit_statement(node, context)", "context): return self.visit_expression(node, context) def visit_function_call(self, node, context): return self.visit_expression(node, context) def visit_lambda_expression(self,", "relation in node.relations: self.process(relation, context) return None def visit_intersect(self, node, context): for relation", "node, context): return self.visit_literal(node, context) def visit_time_literal(self, node, context): return self.visit_literal(node, context) def", "def visit_sort_item(self, node, context): return self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select, context)", "visit_aliased_relation(self, node, context): return self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(),", "context) def visit_long_literal(self, node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return", "None def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context)", "return None \"\"\" def visit_window(self, node, context) for expression in node.partition: self.process(expression, context)", "node.operands: self.process(operand, context) return None def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context)", "visit_show_schemas(self, node, context): return self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return self.visit_statement(node, context)", "self.visit_node(node, context) def visit_frame_bound(self, node, context): return self.visit_node(node, context) def visit_call_argument(self, node, context):", "node, context): self.process(node.first, context) self.process(node.second, context) return None def visit_if_expression(self, node, context): self.process(node.condition,", "visit_union(self, node, context): return self.visit_set_operation(node, context) def visit_intersect(self, node, context): return self.visit_set_operation(node, context)", "self.visit_set_operation(node, context) def visit_except(self, node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context):", "return None def visit_union(self, node, context): for relation in node.relations: self.process(relation, context) return", "context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def visit_comparison_expression(self,", "= node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "context) def visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def visit_literal(self, node, context): return", "context): return self.visit_relation(node, context) def visit_values(self, node, context): return self.visit_query_body(node, context) def visit_row(self,", "context): for query in node.queries: self.process(query, context) return None def visit_with_query(self, node, context):", "node, context): return self.visit_node(node, context) def visit_call_argument(self, node, context): return self.visit_node(node, context) def", "context): return self.visit_expression(node, context) def visit_array_constructor(self, node, context): return self.visit_expression(node, context) def visit_subscript_expression(self,", "def visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return self.visit_expression(node,", "def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context) return None def visit_comparison_expression(self, node,", "class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression, context) def visit_cast(self, node, context):", "in node.partition: self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame,", "return None def visit_table_subquery(self, node, context): return self.process(node.query, context) def visit_aliased_relation(self, node, context):", "context): return self.visit_node(node, context) def visit_delete(self, node, context): return self.visit_statement(node, context) def visit_start_transaction(self,", "node, context): return self.visit_statement(node, context) def visit_start_transaction(self, node, context): return self.visit_statement(node, context) def", "def visit_show_functions(self, node, context): return self.visit_statement(node, context) def visit_use(self, node, context): return self.visit_statement(node,", "node, context): return self.visit_query_body(node, context) def visit_unnest(self, node, context): return self.visit_relation(node, context) def", "context) def visit_literal(self, node, context): return self.visit_expression(node, context) def visit_double_literal(self, node, context): return", "context): return self.visit_relation(node, context) def visit_exists(self, node, context): return self.visit_expression(node, context) def visit_try_expression(self,", "None def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context) return None def visit_if_expression(self,", "self.process(node.default_value, context) return None def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context) if", "def visit_try_expression(self, node, context): return self.visit_expression(node, context) def visit_cast(self, node, context): return self.visit_expression(node,", "return self.process(node.query, context) def visit_select(self, node, context): for item in node.select_items: self.process(item, context)", "node.queries: self.process(query, context) return None def visit_with_query(self, node, context): return self.process(node.query, context) def", "JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos)", "node, context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return None def", "context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node, context): return", "context): return self.visit_expression(node, context) def visit_literal(self, node, context): return self.visit_expression(node, context) def visit_double_literal(self,", "node, context): return self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def", "return self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None", "context) def visit_set_operation(self, node, context): return self.visit_query_body(node, context) def visit_union(self, node, context): return", "def visit_with_query(self, node, context): return self.visit_node(node, context) def visit_select(self, node, context): return self.visit_node(node,", "context): return self.visit_node(node, context) def visit_table_element(self, node, context): return self.visit_node(node, context) def visit_create_table(self,", "context) def visit_insert(self, node, context): return self.visit_node(node, context) def visit_call(self, node, context): return", "the License for the specific language governing permissions and # limitations under the", "context) self.process(node.right, context) return None def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context)", "None def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def visit_is_None_predicate(self, node, context): return", "context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def visit_string_literal(self, node, context): return", "context): return self.visit_statement(node, context) def visit_current_time(self, node, context): return self.visit_expression(node, context) def visit_extract(self,", "visit_unnest(self, node, context): return self.visit_relation(node, context) def visit_values(self, node, context): return self.visit_query_body(node, context)", "node, context) self.process(node.start, context) if node.end: self.process(node.end, context) return None def visit_frame_bound(self, node,", "class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos) def visit_subquery_expression(self, node, context):", "node, context): return self.visit_expression(node, context) def visit_try_expression(self, node, context): return self.visit_expression(node, context) def", "language governing permissions and # limitations under the License. from .join_criteria import JoinOn,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "node, context=None): return node.accept(self, context) def visit_node(self, node, context): pass def visit_expression(self, node,", "node, context): self.process(node.expression, context) return None def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result,", "def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def visit_drop_table(self, node, context): return self.visit_statement(node,", "return self.visit_statement(node, context) def visit_start_transaction(self, node, context): return self.visit_statement(node, context) def visit_grant(self, node,", "visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def visit_string_literal(self, node, context): return self.visit_literal(node, context)", "return self.visit_select_item(node, context) def visit_all_columns(self, node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node,", "def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return self.visit_expression(node,", "self.process(node.start, context) if node.end: self.process(node.end, context) return None def visit_frame_bound(self, node, context) if", "def visit_searched_case_expression(self, node, context): for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value,", "self.process(node.base, context) self.process(node.index, context) return None def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right,", "node, context): return self.visit_node(node, context) def visit_table(self, node, context): return self.visit_query_body(node, context) def", "node, context): return self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return self.visit_statement(node, context) def", "def visit_show_tables(self, node, context): return self.visit_statement(node, context) def visit_show_schemas(self, node, context): return self.visit_statement(node,", "visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return", "context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self, node, context):", "self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def", "from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node, context=None): return node.accept(self,", "context) def visit_node(self, node, context): pass def visit_expression(self, node, context): return self.visit_node(node, context)", "in node.relations: self.process(relation, context) return None def visit_intersect(self, node, context): for relation in", "context): return self.visit_expression(node, context) def visit_like_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self,", "None def visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def visit_not_expression(self, node, context): return", "self.process(sort_item, context) return None def visit_with(self, node, context): for query in node.queries: self.process(query,", "context): return self.visit_expression(node, context) def visit_long_literal(self, node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self,", "visit_row(self, node, context): return self.visit_node(node, context) def visit_table_subquery(self, node, context): return self.visit_query_body(node, context)", "return self.visit_expression(node, context) def visit_if_expression(self, node, context): return self.visit_expression(node, context) def visit_null_literal(self, node,", "context) def visit_query_specification(self, node, context): self.process(node.select, context) if node.from_: self.process(node.from_, context) if node.where:", "grouping_elements = node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context)", "Version 2.0 (the \"License\"); # you may not use this file except in", "context): return self.visit_literal(node, context) def visit_time_literal(self, node, context): return self.visit_literal(node, context) def visit_explain_option(self,", "context) def visit_table_element(self, node, context): return self.visit_node(node, context) def visit_create_table(self, node, context): return", "visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return self.visit_expression(node, context)", "node.where: self.process(node.where, context) if node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements =", "context) def visit_row(self, node, context): return self.visit_node(node, context) def visit_table_subquery(self, node, context): return", "self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression, context) def visit_cast(self,", "self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause", "context): return self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self,", "node, context): return self.visit_node(node, context) def visit_query_body(self, node, context): return self.visit_relation(node, context) def", "self.process(node.max, context) return None def visit_coalesce_expression(self, node, context): for operand in node.operands: self.process(operand,", "node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_subquery_expression(self, node, context): return", "def visit_rename_table(self, node, context): return self.visit_statement(node, context) def visit_rename_column(self, node, context): return self.visit_statement(node,", "return None def visit_dereference_expression(self, node, context): self.process(node.base, context) return None \"\"\" def visit_window(self,", "visit_set_session(self, node, context): return self.visit_statement(node, context) def visit_generic_literal(self, node, context): return self.visit_literal(node, context)", "def visit_with(self, node, context): return self.visit_node(node, context) def visit_approximate(self, node, context): return self.visit_node(node,", "visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def visit_not_expression(self, node, context): return self.visit_expression(node, context)", "context) def visit_rename_table(self, node, context): return self.visit_statement(node, context) def visit_rename_column(self, node, context): return", "return self.visit_node(node, context) def visit_call(self, node, context): return self.visit_node(node, context) def visit_delete(self, node,", "visit_query_body(self, node, context): return self.visit_relation(node, context) def visit_query_specification(self, node, context): return self.visit_query_body(node, context)", "def visit_call_argument(self, node, context): return self.visit_node(node, context) def visit_table_element(self, node, context): return self.visit_node(node,", "return node.accept(self, context) def visit_node(self, node, context): pass def visit_expression(self, node, context): return", "= node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context) if node.having: self.process(node.having, context) for", "return self.visit_expression(node, context) def visit_function_call(self, node, context): return self.visit_expression(node, context) def visit_lambda_expression(self, node,", "visit_time_literal(self, node, context): return self.visit_literal(node, context) def visit_explain_option(self, node, context): return self.visit_node(node, context)", "def visit_string_literal(self, node, context): return self.visit_literal(node, context) def visit_binary_literal(self, node, context): return self.visit_literal(node,", "node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets):", "context) self.process(node.pattern, context) if node.escape is not None: self.process(node.escape, context) return None def", "return self.visit_relation(node, context) def visit_values(self, node, context): return self.visit_query_body(node, context) def visit_row(self, node,", "return self.visit_expression(node, context) def visit_between_predicate(self, node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node,", "context): self.process(node.base, context) self.process(node.index, context) return None def visit_comparison_expression(self, node, context): self.process(node.left, context)", "return self.visit_expression(node, context) def visit_interval_literal(self, node, context): return self.visit_literal(node, context) def visit_in_predicate(self, node,", "def visit_values(self, node, context): for row in node.rows: self.process(row, context) return None def", "None def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context) return None def visit_in_predicate(self,", "node, context): return self.visit_statement(node, context) def visit_show_schemas(self, node, context): return self.visit_statement(node, context) def", "visit_call(self, node, context): return self.visit_node(node, context) def visit_delete(self, node, context): return self.visit_statement(node, context)", "def visit_unnest(self, node, context): return self.visit_relation(node, context) def visit_values(self, node, context): return self.visit_query_body(node,", "self.visit_relation(node, context) def visit_query_specification(self, node, context): return self.visit_query_body(node, context) def visit_set_operation(self, node, context):", "def visit_query_specification(self, node, context): self.process(node.select, context) if node.from_: self.process(node.from_, context) if node.where: self.process(node.where,", "context): return self.visit_expression(node, context) def visit_cast(self, node, context): return self.visit_expression(node, context) def visit_input_reference(self,", "def visit_show_session(self, node, context): return self.visit_statement(node, context) def visit_set_session(self, node, context): return self.visit_statement(node,", "context): for argument in node.arguments: self.process(argument, context) if node.window: self.process(node.window, context) return None", "context) return None def visit_array_constructor(self, node, context): for expression in node.values: self.process(expression, context)", "node, context): return self.visit_set_operation(node, context) def visit_intersect(self, node, context): return self.visit_set_operation(node, context) def", "return self.visit_literal(node, context) def visit_explain_option(self, node, context): return self.visit_node(node, context) def visit_with(self, node,", "node, context): return self.visit_expression(node, context) def visit_not_expression(self, node, context): return self.visit_expression(node, context) def", "return self.visit_node(node, context) def visit_call_argument(self, node, context): return self.visit_node(node, context) def visit_table_element(self, node,", "context) def visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return", "context) def visit_statement(self, node, context): return self.visit_node(node, context) def visit_query(self, node, context): return", "return None def visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def", "node, context): return self.process(node.query, context) def visit_select(self, node, context): for item in node.select_items:", "context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def", "node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def", "visit_approximate(self, node, context): return self.visit_node(node, context) def visit_with_query(self, node, context): return self.visit_node(node, context)", "self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None", "context) if node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context) if node.group_by: grouping_elements =", "return self.visit_statement(node, context) def visit_show_session(self, node, context): return self.visit_statement(node, context) def visit_set_session(self, node,", "return self.process(node.value, context) def visit_searched_case_expression(self, node, context): for clause in node.when_clauses: self.process(clause, context)", "clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self,", "context) def visit_all_columns(self, node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return", "visit_arithmetic_binary(self, node, context): return self.visit_expression(node, context) def visit_between_predicate(self, node, context): return self.visit_expression(node, context)", "visit_with_query(self, node, context): return self.visit_node(node, context) def visit_select(self, node, context): return self.visit_node(node, context)", "visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context) return None def visit_function_call(self, node, context):", "context) if node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self, node, context): self.process(node.value, context)", "node, context): return self.visit_statement(node, context) def visit_rename_column(self, node, context): return self.visit_statement(node, context) def", "node, context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node, context): return self.process(node.value, context)", "self.visit_statement(node, context) def visit_show_partitions(self, node, context): return self.visit_statement(node, context) def visit_show_functions(self, node, context):", "self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def visit_sort_item(self, node, context):", "context) def visit_intersect(self, node, context): return self.visit_set_operation(node, context) def visit_except(self, node, context): return", "visit_window_frame(self, node, context) self.process(node.start, context) if node.end: self.process(node.end, context) return None def visit_frame_bound(self,", "is not None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node, context): return self.process(node.value,", "None def visit_with(self, node, context): for query in node.queries: self.process(query, context) return None", "visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context)", "context): return self.visit_node(node, context) def visit_call_argument(self, node, context): return self.visit_node(node, context) def visit_table_element(self,", "return self.visit_node(node, context) def visit_with_query(self, node, context): return self.visit_node(node, context) def visit_select(self, node,", "self.visit_statement(node, context) def visit_insert(self, node, context): return self.visit_node(node, context) def visit_call(self, node, context):", "OF ANY KIND, either express or implied. # See the License for the", "node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def", "context) if node.having: self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item, context) return None", "node.accept(self, context) def visit_node(self, node, context): pass def visit_expression(self, node, context): return self.visit_node(node,", "visit_function_call(self, node, context): for argument in node.arguments: self.process(argument, context) if node.window: self.process(node.window, context)", "node, context): return self.visit_node(node, context) def visit_with_query(self, node, context): return self.visit_node(node, context) def", "context) return None def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context) if node.escape", "context) def visit_with(self, node, context): return self.visit_node(node, context) def visit_approximate(self, node, context): return", "node, context): return self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def", "visit_cast(self, node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right,", "node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def", "self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def visit_if_expression(self, node, context):", "self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def visit_join(self, node, context):", "def visit_show_columns(self, node, context): return self.visit_statement(node, context) def visit_show_partitions(self, node, context): return self.visit_statement(node,", "for expression in node.items: self.process(expression, context) return None def visit_table_subquery(self, node, context): return", "node, context): pass def visit_expression(self, node, context): return self.visit_node(node, context) def visit_reset_session(self, node,", "node, context): return self.visit_expression(node, context) def visit_between_predicate(self, node, context): return self.visit_expression(node, context) def", "context): return self.visit_literal(node, context) def visit_explain_option(self, node, context): return self.visit_node(node, context) def visit_with(self,", "for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context) return None def", "node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def", "self.visit_node(node, context) def visit_create_table(self, node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context):", "def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context) return None def visit_array_constructor(self, node,", "if node.frame: self.process(node.frame, context) return None def visit_window_frame(self, node, context) self.process(node.start, context) if", "def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def visit_when_clause(self, node, context): return self.visit_expression(node,", "context) return None def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context) return None", "node, context): for relation in node.relations: self.process(relation, context) return None def visit_except(self, node,", "elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor,", "return self.visit_node(node, context) def visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node,", "def visit_table_subquery(self, node, context): return self.process(node.query, context) def visit_aliased_relation(self, node, context): return self.process(node.relation,", "self.visit_expression(node, context) def visit_sort_item(self, node, context): return self.visit_node(node, context) def visit_table(self, node, context):", "or agreed to in writing, software # distributed under the License is distributed", "context) return None def visit_subquery_expression(self, node, context): return self.process(node.query, context) def visit_sort_item(self, node,", "context) return None def visit_with_query(self, node, context): return self.process(node.query, context) def visit_select(self, node,", "def visit_double_literal(self, node, context): return self.visit_literal(node, context) def visit_statement(self, node, context): return self.visit_node(node,", "return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def visit_sampled_relation(self, node,", "visit_single_column(self, node, context): return self.visit_select_item(node, context) def visit_all_columns(self, node, context): return self.visit_select_item(node, context)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "node, context): return self.visit_expression(node, context) def visit_function_call(self, node, context): return self.visit_expression(node, context) def", "visit_function_call(self, node, context): return self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return self.visit_expression(node, context)", "self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return None def visit_window_frame(self, node, context) self.process(node.start,", "License. # You may obtain a copy of the License at # #", "context): return self.visit_node(node, context) def visit_query(self, node, context): return self.visit_statement(node, context) def visit_explain(self,", "context): return self.visit_statement(node, context) def visit_show_tables(self, node, context): return self.visit_statement(node, context) def visit_show_schemas(self,", "visit_query_specification(self, node, context): self.process(node.select, context) if node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context)", "return self.visit_expression(node, context) def visit_double_literal(self, node, context): return self.visit_literal(node, context) def visit_statement(self, node,", "self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def visit_when_clause(self, node, context):", "return self.process(node.query, context) def visit_aliased_relation(self, node, context): return self.process(node.relation, context) def visit_sampled_relation(self, node,", "def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context) if node.escape is not None:", "self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos) def", "context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None):", "# limitations under the License. from .join_criteria import JoinOn, JoinUsing from .grouping import", "context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def visit_drop_table(self,", "def visit_table_element(self, node, context): return self.visit_node(node, context) def visit_create_table(self, node, context): return self.visit_statement(node,", "def visit_set_session(self, node, context): return self.visit_statement(node, context) def visit_generic_literal(self, node, context): return self.visit_literal(node,", "context): return self.visit_expression(node, context) def visit_try_expression(self, node, context): return self.visit_expression(node, context) def visit_cast(self,", "context): return self.visit_literal(node, context) def visit_statement(self, node, context): return self.visit_node(node, context) def visit_query(self,", "None def visit_in_list_expression(self, node, context): for value in node.values: self.process(value, context) return None", "context): return self.visit_statement(node, context) def visit_show_functions(self, node, context): return self.visit_statement(node, context) def visit_use(self,", "if node.having: self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item, context) return None def", "grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element in grouping_elements:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "in node.order_by: self.process(sort_item, context) return None def visit_with(self, node, context): for query in", "the License. from .join_criteria import JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets class", ".grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node, context=None): return node.accept(self, context)", "context) def visit_query(self, node, context): return self.visit_statement(node, context) def visit_explain(self, node, context): return", "return self.visit_expression(node, context) def visit_literal(self, node, context): return self.visit_expression(node, context) def visit_double_literal(self, node,", "context) def visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def visit_long_literal(self, node, context): return", "def visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def visit_sort_item(self, node, context): return self.visit_node(node,", "self.process(node.result, context) return None def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context) return", "context): return self.process(node.query, context) def visit_sort_item(self, node, context): return self.process(node.sort_key, context) def visit_query_specification(self,", "if node.where: self.process(node.where, context) if node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements", "return self.visit_node(node, context) def visit_frame_bound(self, node, context): return self.visit_node(node, context) def visit_call_argument(self, node,", "node, context): return self.visit_statement(node, context) def visit_insert(self, node, context): return self.visit_node(node, context) def", "def visit_extract(self, node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node, context): return self.visit_expression(node,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def visit_like_predicate(self, node, context): return", "visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get():", "context) def visit_table(self, node, context): return self.visit_query_body(node, context) def visit_unnest(self, node, context): return", "node, context) for expression in node.partition: self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key,", "node, context): return self.visit_expression(node, context) def visit_null_literal(self, node, context): return self.visit_literal(node, context) def", "node, context): return self.visit_node(node, context) def visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def", "self.visit_node(node, context) def visit_query_body(self, node, context): return self.visit_relation(node, context) def visit_query_specification(self, node, context):", "self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return self.visit_literal(node, context) def visit_in_list_expression(self, node, context):", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "context) def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return", "node.value: self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for", "context): return self.visit_expression(node, context) def visit_string_literal(self, node, context): return self.visit_literal(node, context) def visit_binary_literal(self,", "context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None", "License. from .join_criteria import JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object):", "visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def visit_drop_table(self, node, context): return self.visit_statement(node, context)", "node, context): for row in node.rows: self.process(row, context) return None def visit_row(self, node,", "def visit_cast(self, node, context): return self.visit_expression(node, context) def visit_input_reference(self, node, context): return self.visit_expression(node,", "context) return None def visit_coalesce_expression(self, node, context): for operand in node.operands: self.process(operand, context)", "context) def visit_exists(self, node, context): return self.visit_expression(node, context) def visit_try_expression(self, node, context): return", "return self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present():", "context) for sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return None", "in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self, node, context): self.process(node.left, context) self.process(node.right,", "context) def visit_window_frame(self, node, context): return self.visit_node(node, context) def visit_frame_bound(self, node, context): return", "self.process(node.operand, context) for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return", "def visit_transaction_access_mode(self, node, context): return self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return self.visit_statement(node,", "self.process(node.left, context) self.process(node.right, context) return None def visit_subquery_expression(self, node, context): return self.process(node.query, context)", "context): for relation in node.relations: self.process(relation, context) return None def visit_except(self, node, context):", "context): self.process(node.left, context) self.process(node.right, context) return None def visit_subquery_expression(self, node, context): return self.process(node.query,", "None def visit_with_query(self, node, context): return self.process(node.query, context) def visit_select(self, node, context): for", "node, context): self.process(node.operand, context) for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee,", "self.process(operand, context) return None def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context) return", "return self.visit_expression(node, context) def visit_extract(self, node, context): return self.visit_expression(node, context) def visit_arithmetic_binary(self, node,", "or implied. # See the License for the specific language governing permissions and", "return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def visit_dereference_expression(self, node,", "context) def visit_select(self, node, context): return self.visit_node(node, context) def visit_relation(self, node, context): return", "context) def visit_query_body(self, node, context): return self.visit_relation(node, context) def visit_query_specification(self, node, context): return", "def visit_single_column(self, node, context): return self.visit_select_item(node, context) def visit_all_columns(self, node, context): return self.visit_select_item(node,", "node, context): return self.visit_literal(node, context) def visit_explain_option(self, node, context): return self.visit_node(node, context) def", "context) def visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def visit_sort_item(self, node, context): return", "for item in node.select_items: self.process(item, context) return None def visit_single_column(self, node, context): self.process(node.expression,", "context) def visit_aliased_relation(self, node, context): return self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation,", "self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def visit_null_if_expression(self, node, context):", "visit_like_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context)", "self.visit_statement(node, context) def visit_rollback(self, node, context): return self.visit_statement(node, context) def visit_at_time_zone(self, node, context):", "node, context): return self.process(node.query, context) def visit_sort_item(self, node, context): return self.process(node.sort_key, context) def", "visit_create_table(self, node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context)", "def visit_array_constructor(self, node, context): for expression in node.values: self.process(expression, context) return None def", "self.process(node.value, context) def visit_searched_case_expression(self, node, context): for clause in node.when_clauses: self.process(clause, context) if", "return self.visit_transaction_mode(node, context) def visit_commit(self, node, context): return self.visit_statement(node, context) def visit_rollback(self, node,", "return self.visit_node(node, context) def visit_relation(self, node, context): return self.visit_node(node, context) def visit_query_body(self, node,", "self.visit_literal(node, context) def visit_statement(self, node, context): return self.visit_node(node, context) def visit_query(self, node, context):", "return self.visit_expression(node, context) def visit_cast(self, node, context): return self.visit_expression(node, context) def visit_input_reference(self, node,", "self.visit_literal(node, context) def visit_time_literal(self, node, context): return self.visit_literal(node, context) def visit_explain_option(self, node, context):", "return self.visit_expression(node, context) def visit_window(self, node, context): return self.visit_node(node, context) def visit_window_frame(self, node,", "visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return self.visit_expression(node, context)", "return None def visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def visit_not_expression(self, node, context):", "def visit_show_partitions(self, node, context): return self.visit_statement(node, context) def visit_show_functions(self, node, context): return self.visit_statement(node,", "visit_node(self, node, context): pass def visit_expression(self, node, context): return self.visit_node(node, context) def visit_reset_session(self,", "node, context): return self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select, context) if node.from_:", "def visit_long_literal(self, node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node,", "self.visit_node(node, context) def visit_call_argument(self, node, context): return self.visit_node(node, context) def visit_table_element(self, node, context):", "context) def visit_input_reference(self, node, context): return self.visit_expression(node, context) def visit_window(self, node, context): return", "use this file except in compliance with the License. # You may obtain", "context): return self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return self.visit_expression(node, context) def visit_null_if_expression(self,", "visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def visit_like_predicate(self, node, context): return self.visit_expression(node, context)", "context) def visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node,", "visit_explain(self, node, context): return self.visit_statement(node, context) def visit_show_tables(self, node, context): return self.visit_statement(node, context)", "node, context): return self.visit_node(node, context) def visit_delete(self, node, context): return self.visit_statement(node, context) def", "node, context): for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context) return", "return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def visit_subquery_expression(self, node,", "self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node, context): return self.process(node.value, context) def visit_not_expression(self,", "visit_query_specification(self, node, context): return self.visit_query_body(node, context) def visit_set_operation(self, node, context): return self.visit_query_body(node, context)", "visit_not_expression(self, node, context): return self.process(node.value, context) def visit_searched_case_expression(self, node, context): for clause in", "sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return None def visit_window_frame(self,", "def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def visit_not_expression(self, node, context): return self.visit_expression(node,", "return None def visit_coalesce_expression(self, node, context): for operand in node.operands: self.process(operand, context) return", "return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def visit_drop_table(self, node,", "def visit_between_predicate(self, node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node,", "context): return self.visit_set_operation(node, context) def visit_except(self, node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self,", "context) def visit_call_argument(self, node, context): return self.visit_node(node, context) def visit_table_element(self, node, context): return", "def visit_explain(self, node, context): return self.visit_statement(node, context) def visit_show_tables(self, node, context): return self.visit_statement(node,", "node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_query(self, node, context): self.process(node.query_body,", "return None def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context) return None def", "context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def visit_like_predicate(self,", "row in node.rows: self.process(row, context) return None def visit_row(self, node, context): for expression", "context) if node.window: self.process(node.window, context) return None def visit_dereference_expression(self, node, context): self.process(node.base, context)", "visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def visit_if_expression(self, node, context): return self.visit_expression(node, context)", "self.visit_query_body(node, context) def visit_union(self, node, context): return self.visit_set_operation(node, context) def visit_intersect(self, node, context):", "context): return self.visit_expression(node, context) def visit_window(self, node, context): return self.visit_node(node, context) def visit_window_frame(self,", "self.process(node.operand, context) self.process(node.result, context) return None def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list,", "visit_reset_session(self, node, context): return self.visit_statement(node, context) def visit_current_time(self, node, context): return self.visit_expression(node, context)", "def visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def visit_long_literal(self, node, context): return self.visit_literal(node,", "self.visit_expression(node, context) def visit_cast(self, node, context): return self.visit_expression(node, context) def visit_input_reference(self, node, context):", "context): return self.visit_node(node, context) def visit_select(self, node, context): return self.visit_node(node, context) def visit_relation(self,", "<reponame>provingground-moe/lacquer # -*- coding: utf-8 -*- # # Licensed under the Apache License,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "context) def visit_union(self, node, context): return self.visit_set_operation(node, context) def visit_intersect(self, node, context): return", "context) def visit_explain(self, node, context): return self.visit_statement(node, context) def visit_show_tables(self, node, context): return", "node, context): return self.visit_statement(node, context) def visit_drop_table(self, node, context): return self.visit_statement(node, context) def", "process(self, node, context=None): return node.accept(self, context) def visit_node(self, node, context): pass def visit_expression(self,", "context): return self.visit_expression(node, context) def visit_between_predicate(self, node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self,", "visit_literal(self, node, context): return self.visit_expression(node, context) def visit_double_literal(self, node, context): return self.visit_literal(node, context)", "visit_table(self, node, context): return self.visit_query_body(node, context) def visit_unnest(self, node, context): return self.visit_relation(node, context)", "node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node, context): for value in node.values:", "context) for sort_item in node.order_by: self.process(sort_item, context) return None def visit_union(self, node, context):", "node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def", "self.visit_statement(node, context) def visit_show_tables(self, node, context): return self.visit_statement(node, context) def visit_show_schemas(self, node, context):", "node, context): return self.visit_expression(node, context) def visit_sort_item(self, node, context): return self.visit_node(node, context) def", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "context) def visit_like_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return", "node, context): return self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def", "context) def visit_delete(self, node, context): return self.visit_statement(node, context) def visit_start_transaction(self, node, context): return", "context) def visit_start_transaction(self, node, context): return self.visit_statement(node, context) def visit_grant(self, node, context): return", "return self.visit_literal(node, context) def visit_in_predicate(self, node, context): return self.visit_expression(node, context) def visit_function_call(self, node,", "context=None): return node.accept(self, context) def visit_node(self, node, context): pass def visit_expression(self, node, context):", "self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context):", "return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context) return None", "context) return None def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def visit_is_None_predicate(self, node,", "context): return self.visit_statement(node, context) def visit_drop_view(self, node, context): return self.visit_statement(node, context) def visit_insert(self,", "context): return self.visit_literal(node, context) def visit_when_clause(self, node, context): return self.visit_expression(node, context) def visit_interval_literal(self,", "self.process(node.value, context) def visit_not_expression(self, node, context): return self.process(node.value, context) def visit_searched_case_expression(self, node, context):", "node, context): return self.visit_statement(node, context) def visit_rename_table(self, node, context): return self.visit_statement(node, context) def", "context) def visit_sort_item(self, node, context): return self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select,", "context): self.process(node.operand, context) for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context)", "relation in node.relations: self.process(relation, context) return None def visit_except(self, node, context): self.process(node.left, context)", "with the License. # You may obtain a copy of the License at", "node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def", "return self.visit_statement(node, context) def visit_show_partitions(self, node, context): return self.visit_statement(node, context) def visit_show_functions(self, node,", "def visit_start_transaction(self, node, context): return self.visit_statement(node, context) def visit_grant(self, node, context): return self.visit_statement(node,", "context): return self.visit_statement(node, context) def visit_show_columns(self, node, context): return self.visit_statement(node, context) def visit_show_partitions(self,", "node, context): return self.visit_expression(node, context) def visit_window(self, node, context): return self.visit_node(node, context) def", "return self.visit_statement(node, context) def visit_show_tables(self, node, context): return self.visit_statement(node, context) def visit_show_schemas(self, node,", "self.process(node.time_zone, context) return None def visit_array_constructor(self, node, context): for expression in node.values: self.process(expression,", "context) return None def visit_except(self, node, context): self.process(node.left, context) self.process(node.right, context) return None", "law or agreed to in writing, software # distributed under the License is", "return self.visit_literal(node, context) def visit_time_literal(self, node, context): return self.visit_literal(node, context) def visit_explain_option(self, node,", "the specific language governing permissions and # limitations under the License. from .join_criteria", "context) def visit_explain_option(self, node, context): return self.visit_node(node, context) def visit_with(self, node, context): return", "context) self.process(node.min, context) self.process(node.max, context) return None def visit_coalesce_expression(self, node, context): for operand", "self.process(node.value, context) def visit_is_None_predicate(self, node, context): return self.process(node.value, context) def visit_logical_binary_expression(self, node, context):", "node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression,", "node, context): return self.visit_statement(node, context) def visit_set_session(self, node, context): return self.visit_statement(node, context) def", "self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns)", "context): pass def visit_expression(self, node, context): return self.visit_node(node, context) def visit_reset_session(self, node, context):", "context): return self.visit_statement(node, context) def visit_start_transaction(self, node, context): return self.visit_statement(node, context) def visit_grant(self,", "for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return None def", "context) def visit_is_None_predicate(self, node, context): return self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left,", "in compliance with the License. # You may obtain a copy of the", "self.process(node.index, context) return None def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "visit_union(self, node, context): for relation in node.relations: self.process(relation, context) return None def visit_intersect(self,", "return None def visit_frame_bound(self, node, context) if node.value: self.process(node.value, context) return None \"\"\"", "context): self.process(node.left, context) self.process(node.right, context) return None def visit_query(self, node, context): self.process(node.query_body, context)", "context): return self.visit_node(node, context) def visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self,", "def visit_table(self, node, context): return self.visit_query_body(node, context) def visit_unnest(self, node, context): return self.visit_relation(node,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "None \"\"\" def visit_window(self, node, context) for expression in node.partition: self.process(expression, context) for", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "return self.visit_expression(node, context) def visit_input_reference(self, node, context): return self.visit_expression(node, context) def visit_window(self, node,", "node, context): self.process(node.base, context) self.process(node.index, context) return None def visit_comparison_expression(self, node, context): self.process(node.left,", "visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return self.visit_expression(node, context)", "context) self.process(node.second, context) return None def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context)", "node, context): return self.visit_statement(node, context) def visit_current_time(self, node, context): return self.visit_expression(node, context) def", "def visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def visit_join(self, node, context): return self.visit_relation(node,", "node, context): return self.process(node.expression, context) def visit_cast(self, node, context): return self.process(node.expression, context) def", "context): return self.visit_node(node, context) def visit_table(self, node, context): return self.visit_query_body(node, context) def visit_unnest(self,", "isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context) if node.having:", "return self.visit_statement(node, context) def visit_set_session(self, node, context): return self.visit_statement(node, context) def visit_generic_literal(self, node,", "context) return None def visit_in_list_expression(self, node, context): for value in node.values: self.process(value, context)", "self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def visit_comparison_expression(self, node, context):", "def visit_rollback(self, node, context): return self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return self.visit_expression(node,", "return self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def", "for the specific language governing permissions and # limitations under the License. from", "node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context)", "context) def visit_show_schemas(self, node, context): return self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return", "return self.visit_literal(node, context) def visit_when_clause(self, node, context): return self.visit_expression(node, context) def visit_interval_literal(self, node,", "context) def visit_create_table(self, node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return", "node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_between_predicate(self, node, context): self.process(node.value,", "self.visit_statement(node, context) def visit_grant(self, node, context): return self.visit_statement(node, context) def visit_transaction_mode(self, node, context):", "visit_between_predicate(self, node, context): return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context)", "return None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause in node.when_clauses:", "node, context): return self.visit_expression(node, context) def visit_input_reference(self, node, context): return self.visit_expression(node, context) def", "in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context) return None def visit_like_predicate(self, node,", "def visit_reset_session(self, node, context): return self.visit_statement(node, context) def visit_current_time(self, node, context): return self.visit_expression(node,", "context) def visit_commit(self, node, context): return self.visit_statement(node, context) def visit_rollback(self, node, context): return", "expression in node.partition: self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame:", "node, context): return self.process(node.query, context) def visit_aliased_relation(self, node, context): return self.process(node.relation, context) def", "return self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node,", "def visit_with(self, node, context): for query in node.queries: self.process(query, context) return None def", "self.process(node.value_list, context) return None def visit_function_call(self, node, context): for argument in node.arguments: self.process(argument,", "node, context): return self.visit_statement(node, context) def visit_use(self, node, context): return self.visit_statement(node, context) def", "def visit_transaction_mode(self, node, context): return self.visit_node(node, context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node,", "None def visit_single_column(self, node, context): self.process(node.expression, context) return None def visit_when_clause(self, node, context):", "context) def visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def visit_show_columns(self, node, context): return", "def visit_not_expression(self, node, context): return self.process(node.value, context) def visit_searched_case_expression(self, node, context): for clause", "context) if node.value: self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand,", "return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def visit_not_expression(self, node,", "context) def visit_select_item(self, node, context): return self.visit_node(node, context) def visit_single_column(self, node, context): return", "node, context): return self.visit_node(node, context) def visit_reset_session(self, node, context): return self.visit_statement(node, context) def", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "node, context): return self.visit_relation(node, context) def visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def", "# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version", "visit_table_subquery(self, node, context): return self.process(node.query, context) def visit_aliased_relation(self, node, context): return self.process(node.relation, context)", "context) def visit_cast(self, node, context): return self.visit_expression(node, context) def visit_input_reference(self, node, context): return", "self.process(node.expression, context) return None def visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context) return", "def visit_explain_option(self, node, context): return self.visit_node(node, context) def visit_with(self, node, context): return self.visit_node(node,", "return self.visit_expression(node, context) def visit_sort_item(self, node, context): return self.visit_node(node, context) def visit_table(self, node,", "def visit_window(self, node, context): return self.visit_node(node, context) def visit_window_frame(self, node, context): return self.visit_node(node,", "context) def visit_approximate(self, node, context): return self.visit_node(node, context) def visit_with_query(self, node, context): return", "def visit_aliased_relation(self, node, context): return self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation, context)", "context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def visit_string_literal(self,", "context): for operand in node.operands: self.process(operand, context) return None def visit_at_time_zone(self, node, context):", "visit_array_constructor(self, node, context): for expression in node.values: self.process(expression, context) return None def visit_subscript_expression(self,", "context) self.process(node.time_zone, context) return None def visit_array_constructor(self, node, context): for expression in node.values:", "this file except in compliance with the License. # You may obtain a", "context): return self.visit_node(node, context) def visit_window_frame(self, node, context): return self.visit_node(node, context) def visit_frame_bound(self,", "for sort_item in node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return None def", "self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def visit_is_None_predicate(self,", "def visit_cast(self, node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self, node, context): self.process(node.left, context)", "node, context): return self.visit_literal(node, context) def visit_in_predicate(self, node, context): return self.visit_expression(node, context) def", "return self.visit_expression(node, context) def visit_like_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node,", "visit_not_expression(self, node, context): return self.visit_expression(node, context) def visit_select_item(self, node, context): return self.visit_node(node, context)", "import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node, context=None): return node.accept(self, context) def", "visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def visit_is_None_predicate(self, node, context): return self.process(node.value, context)", "context) return None def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context) return None", "context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class", "return self.visit_node(node, context) def visit_query(self, node, context): return self.visit_statement(node, context) def visit_explain(self, node,", "self.visit_node(node, context) def visit_call(self, node, context): return self.visit_node(node, context) def visit_delete(self, node, context):", "context) self.process(node.index, context) return None def visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context)", "context) return None def visit_with(self, node, context): for query in node.queries: self.process(query, context)", "self.process(node.min, context) self.process(node.max, context) return None def visit_coalesce_expression(self, node, context): for operand in", "return self.visit_query_body(node, context) def visit_union(self, node, context): return self.visit_set_operation(node, context) def visit_intersect(self, node,", "visit_select(self, node, context): for item in node.select_items: self.process(item, context) return None def visit_single_column(self,", "node, context): return self.visit_set_operation(node, context) def visit_except(self, node, context): return self.visit_set_operation(node, context) def", "node, context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria,", "if node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by,", "context) def visit_select(self, node, context): for item in node.select_items: self.process(item, context) return None", "return self.visit_node(node, context) def visit_table_element(self, node, context): return self.visit_node(node, context) def visit_create_table(self, node,", "return self.visit_statement(node, context) def visit_rename_column(self, node, context): return self.visit_statement(node, context) def visit_add_column(self, node,", "self.visit_expression(node, context) def visit_if_expression(self, node, context): return self.visit_expression(node, context) def visit_null_literal(self, node, context):", "node, context): return self.visit_relation(node, context) def visit_exists(self, node, context): return self.visit_expression(node, context) def", "visit_show_tables(self, node, context): return self.visit_statement(node, context) def visit_show_schemas(self, node, context): return self.visit_statement(node, context)", "self.visit_expression(node, context) def visit_array_constructor(self, node, context): return self.visit_expression(node, context) def visit_subscript_expression(self, node, context):", "context) def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression", "context) def visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def visit_join(self, node, context): return", "node, context): for query in node.queries: self.process(query, context) return None def visit_with_query(self, node,", "None def visit_dereference_expression(self, node, context): self.process(node.base, context) return None \"\"\" def visit_window(self, node,", "if node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return", "return self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def visit_sort_item(self, node,", "self.visit_statement(node, context) def visit_show_catalogs(self, node, context): return self.visit_statement(node, context) def visit_show_columns(self, node, context):", "def visit_function_call(self, node, context): for argument in node.arguments: self.process(argument, context) if node.window: self.process(node.window,", "def visit_null_literal(self, node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node,", "context) return None \"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause in", "visit_single_column(self, node, context): self.process(node.expression, context) return None def visit_when_clause(self, node, context): self.process(node.operand, context)", "node.window: self.process(node.window, context) return None def visit_dereference_expression(self, node, context): self.process(node.base, context) return None", "node.values: self.process(expression, context) return None def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context)", "visit_exists(self, node, context): return self.visit_expression(node, context) def visit_try_expression(self, node, context): return self.visit_expression(node, context)", "visit_rollback(self, node, context): return self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return self.visit_expression(node, context)", "def visit_join(self, node, context): return self.visit_relation(node, context) def visit_exists(self, node, context): return self.visit_expression(node,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "context) for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return None", "for expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self, node, context): self.process(node.left,", "context) def visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return", "self.visit_select_item(node, context) def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def visit_like_predicate(self, node, context):", "self.visit_node(node, context) def visit_window_frame(self, node, context): return self.visit_node(node, context) def visit_frame_bound(self, node, context):", "def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return", "visit_sampled_relation(self, node, context): return self.visit_relation(node, context) def visit_join(self, node, context): return self.visit_relation(node, context)", "visit_try_expression(self, node, context): return self.visit_expression(node, context) def visit_cast(self, node, context): return self.visit_expression(node, context)", "required by applicable law or agreed to in writing, software # distributed under", "item in node.select_items: self.process(item, context) return None def visit_single_column(self, node, context): self.process(node.expression, context)", "context): return self.visit_node(node, context) def visit_query_body(self, node, context): return self.visit_relation(node, context) def visit_query_specification(self,", "None def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context) return None def visit_comparison_expression(self,", "context): self.process(node.operand, context) self.process(node.result, context) return None def visit_in_predicate(self, node, context): self.process(node.value, context)", "return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context): return self.process(node.expression, context) def", "def visit_searched_case_expression(self, node, context): return self.visit_expression(node, context) def visit_like_predicate(self, node, context): return self.visit_expression(node,", "visit_null_literal(self, node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context)", "visit_window_frame(self, node, context): return self.visit_node(node, context) def visit_frame_bound(self, node, context): return self.visit_node(node, context)", "isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line,", "GroupingSets class AstVisitor(object): def process(self, node, context=None): return node.accept(self, context) def visit_node(self, node,", "visit_use(self, node, context): return self.visit_statement(node, context) def visit_show_session(self, node, context): return self.visit_statement(node, context)", "return None def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def visit_is_None_predicate(self, node, context):", "self.process(node.right, context) return None def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max,", "return self.visit_expression(node, context) def visit_coalesce_expression(self, node, context): return self.visit_expression(node, context) def visit_comparison_expression(self, node,", "def visit_node(self, node, context): pass def visit_expression(self, node, context): return self.visit_node(node, context) def", "return None def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node,", "context): return self.visit_statement(node, context) def visit_rename_column(self, node, context): return self.visit_statement(node, context) def visit_add_column(self,", "def visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node,", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self.visit_statement(node, context) def visit_generic_literal(self, node, context): return self.visit_literal(node, context) def visit_time_literal(self, node, context):", "node, context): return self.visit_node(node, context) def visit_create_table(self, node, context): return self.visit_statement(node, context) def", "node, context): return self.visit_statement(node, context) def visit_show_partitions(self, node, context): return self.visit_statement(node, context) def", "context): return self.visit_expression(node, context) def visit_if_expression(self, node, context): return self.visit_expression(node, context) def visit_null_literal(self,", "in node.select_items: self.process(item, context) return None def visit_single_column(self, node, context): self.process(node.expression, context) return", "return self.visit_expression(node, context) def visit_array_constructor(self, node, context): return self.visit_expression(node, context) def visit_subscript_expression(self, node,", "def visit_except(self, node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node,", "self.process(node.select, context) if node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context) if node.group_by: grouping_elements", "return self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return self.visit_expression(node, context) def visit_simple_case_expression(self, node,", "context) if node.end: self.process(node.end, context) return None def visit_frame_bound(self, node, context) if node.value:", "return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context) def visit_when_clause(self, node,", "node, context): return self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context)", "def process(self, node, context=None): return node.accept(self, context) def visit_node(self, node, context): pass def", "def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context) def visit_is_None_predicate(self, node, context): return self.process(node.value,", "for query in node.queries: self.process(query, context) return None def visit_with_query(self, node, context): return", "context): return self.visit_statement(node, context) def visit_generic_literal(self, node, context): return self.visit_literal(node, context) def visit_time_literal(self,", "None def visit_try_expression(self, node, context): self.process(node.inner_expression, context) return None def visit_arithmetic_unary(self, node, context):", "context): return self.process(node.value, context) def visit_searched_case_expression(self, node, context): for clause in node.when_clauses: self.process(clause,", "node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for expression in node.get_columns_to_stratify_on().get(): self.process(expression,", "visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def visit_long_literal(self, node, context): return self.visit_literal(node, context)", "context): return self.process(node.expression, context) def visit_cast(self, node, context): return self.process(node.expression, context) def visit_arithmetic_binary(self,", "return self.visit_expression(node, context) def visit_null_literal(self, node, context): return self.visit_literal(node, context) def visit_arithmetic_unary(self, node,", "node.relations: self.process(relation, context) return None def visit_intersect(self, node, context): for relation in node.relations:", "context) def visit_transaction_mode(self, node, context): return self.visit_node(node, context) def visit_isolation_level(self, node, context): return", "node.order_by: self.process(sort_item.sort_key, context) if node.frame: self.process(node.frame, context) return None def visit_window_frame(self, node, context)", "node, context): return self.visit_node(node, context) def visit_single_column(self, node, context): return self.visit_select_item(node, context) def", "return self.visit_expression(node, context) def visit_not_expression(self, node, context): return self.visit_expression(node, context) def visit_select_item(self, node,", ".join_criteria import JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self,", "def visit_rename_column(self, node, context): return self.visit_statement(node, context) def visit_add_column(self, node, context): return self.visit_statement(node,", "context): return self.process(node.query, context) def visit_aliased_relation(self, node, context): return self.process(node.relation, context) def visit_sampled_relation(self,", "self.visit_statement(node, context) def visit_show_columns(self, node, context): return self.visit_statement(node, context) def visit_show_partitions(self, node, context):", "visit_cast(self, node, context): return self.visit_expression(node, context) def visit_input_reference(self, node, context): return self.visit_expression(node, context)", "context): self.process(node.value, context) self.process(node.value_list, context) return None def visit_function_call(self, node, context): for argument", "context): return self.visit_query_body(node, context) def visit_unnest(self, node, context): return self.visit_relation(node, context) def visit_values(self,", "context) def visit_searched_case_expression(self, node, context): for clause in node.when_clauses: self.process(clause, context) if node.default_value:", "node, context): return self.visit_query_body(node, context) def visit_union(self, node, context): return self.visit_set_operation(node, context) def", "def visit_delete(self, node, context): return self.visit_statement(node, context) def visit_start_transaction(self, node, context): return self.visit_statement(node,", "node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node, context):", "not None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self, node, context): return self.process(node.value, context)", "# you may not use this file except in compliance with the License.", "self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def", "visit_subquery_expression(self, node, context): return self.visit_expression(node, context) def visit_sort_item(self, node, context): return self.visit_node(node, context)", "None def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return None", "self.process(node.true_value, context) if node.false_value: self.process(node.false_value, context) return None def visit_try_expression(self, node, context): self.process(node.inner_expression,", "self.process(node.pattern, context) if node.escape is not None: self.process(node.escape, context) return None def visit_is_not_None_predicate(self,", "context): return self.visit_expression(node, context) def visit_interval_literal(self, node, context): return self.visit_literal(node, context) def visit_in_predicate(self,", "node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element in grouping_elements: self.process(grouping_element, context)", "context) def visit_drop_table(self, node, context): return self.visit_statement(node, context) def visit_rename_table(self, node, context): return", "context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return", "context): for clause in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_value, context) return None", "context) def visit_show_partitions(self, node, context): return self.visit_statement(node, context) def visit_show_functions(self, node, context): return", "return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node,", "context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def visit_not_expression(self, node, context): return", "context) def visit_single_column(self, node, context): return self.visit_select_item(node, context) def visit_all_columns(self, node, context): return", "context): return self.visit_statement(node, context) def visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor):", "None def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context) return None def visit_function_call(self,", "self.process(node.right, context) return None def visit_subquery_expression(self, node, context): return self.process(node.query, context) def visit_sort_item(self,", "return self.visit_statement(node, context) def visit_show_functions(self, node, context): return self.visit_statement(node, context) def visit_use(self, node,", "node, context): self.process(node.operand, context) self.process(node.result, context) return None def visit_in_predicate(self, node, context): self.process(node.value,", "for row in node.rows: self.process(row, context) return None def visit_row(self, node, context): for", "self.process(node.left, context) self.process(node.right, context) return None def visit_values(self, node, context): for row in", "License for the specific language governing permissions and # limitations under the License.", "self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context):", "return self.visit_node(node, context) def visit_reset_session(self, node, context): return self.visit_statement(node, context) def visit_current_time(self, node,", "def visit_function_call(self, node, context): return self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return self.visit_expression(node,", "def visit_query(self, node, context): return self.visit_statement(node, context) def visit_explain(self, node, context): return self.visit_statement(node,", "visit_comparison_expression(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_query(self, node, context):", "context) def visit_when_clause(self, node, context): return self.visit_expression(node, context) def visit_interval_literal(self, node, context): return", "return self.visit_expression(node, context) def visit_try_expression(self, node, context): return self.visit_expression(node, context) def visit_cast(self, node,", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "and # limitations under the License. from .join_criteria import JoinOn, JoinUsing from .grouping", "node.from_: self.process(node.from_, context) if node.where: self.process(node.where, context) if node.group_by: grouping_elements = [] if", "visit_when_clause(self, node, context): self.process(node.operand, context) self.process(node.result, context) return None def visit_in_predicate(self, node, context):", "visit_except(self, node, context): return self.visit_set_operation(node, context) def visit_timestamp_literal(self, node, context): return self.visit_literal(node, context)", "return self.visit_node(node, context) def visit_single_column(self, node, context): return self.visit_select_item(node, context) def visit_all_columns(self, node,", "self.process(grouping_element, context) if node.having: self.process(node.having, context) for sort_item in node.order_by: self.process(sort_item, context) return", "return None def visit_with_query(self, node, context): return self.process(node.query, context) def visit_select(self, node, context):", "visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context) return None def visit_array_constructor(self, node, context):", "visit_grant(self, node, context): return self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return self.visit_node(node, context)", "node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self, node, context): return self.visit_expression(node, context) def", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node, context): for value in node.values: self.process(value,", "def visit_at_time_zone(self, node, context): return self.visit_expression(node, context) class DefaultTraversalVisitor(AstVisitor): def visit_extract(self, node, context):", "from .join_criteria import JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def", "in node.values: self.process(expression, context) return None def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index,", "self.visit_node(node, context) def visit_relation(self, node, context): return self.visit_node(node, context) def visit_query_body(self, node, context):", "\"\"\" def visit_simple_case_expression(self, node, context): self.process(node.operand, context) for clause in node.when_clauses: self.process(clause, context)", "context) def visit_qualified_name_reference(self, node, context): return self.visit_expression(node, context) def visit_dereference_expression(self, node, context): return", "return self.visit_expression(node, context) def visit_select_item(self, node, context): return self.visit_node(node, context) def visit_single_column(self, node,", "self.visit_set_operation(node, context) def visit_intersect(self, node, context): return self.visit_set_operation(node, context) def visit_except(self, node, context):", "def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return None def", "node, context): return self.visit_expression(node, context) def visit_like_predicate(self, node, context): return self.visit_expression(node, context) def", "def visit_literal(self, node, context): return self.visit_expression(node, context) def visit_double_literal(self, node, context): return self.visit_literal(node,", "def visit_generic_literal(self, node, context): return self.visit_literal(node, context) def visit_time_literal(self, node, context): return self.visit_literal(node,", "def visit_create_table(self, node, context): return self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node,", "self.visit_statement(node, context) def visit_drop_view(self, node, context): return self.visit_statement(node, context) def visit_insert(self, node, context):", "return None def visit_with(self, node, context): for query in node.queries: self.process(query, context) return", "if node.end: self.process(node.end, context) return None def visit_frame_bound(self, node, context) if node.value: self.process(node.value,", "self.visit_expression(node, context) def visit_string_literal(self, node, context): return self.visit_literal(node, context) def visit_binary_literal(self, node, context):", "node, context) if node.value: self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self, node, context):", "self.visit_node(node, context) def visit_table_subquery(self, node, context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context):", "DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None, pos=None): super(DefaultExpressionTraversalVisitor, self).__init__(line, pos) def visit_subquery_expression(self, node, context): return", "return self.visit_expression(node, context) def visit_subscript_expression(self, node, context): return self.visit_expression(node, context) def visit_long_literal(self, node,", "context) self.process(node.right, context) return None def visit_query(self, node, context): self.process(node.query_body, context) for sort_item", "node, context): return self.visit_expression(node, context) def visit_long_literal(self, node, context): return self.visit_literal(node, context) def", "self.visit_relation(node, context) def visit_exists(self, node, context): return self.visit_expression(node, context) def visit_try_expression(self, node, context):", "return None def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context) return None def", "class AstVisitor(object): def process(self, node, context=None): return node.accept(self, context) def visit_node(self, node, context):", "isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for grouping_element", "self.visit_statement(node, context) def visit_create_table_as_select(self, node, context): return self.visit_statement(node, context) def visit_drop_table(self, node, context):", "context): return self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def visit_if_expression(self,", "import JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node,", "SimpleGroupBy, GroupingSets class AstVisitor(object): def process(self, node, context=None): return node.accept(self, context) def visit_node(self,", "node, context): return self.visit_node(node, context) def visit_query(self, node, context): return self.visit_statement(node, context) def", "self.visit_literal(node, context) def visit_explain_option(self, node, context): return self.visit_node(node, context) def visit_with(self, node, context):", "self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return None class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor): def __init__(self, line=None,", "context) def visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def visit_if_expression(self, node, context): return", "visit_statement(self, node, context): return self.visit_node(node, context) def visit_query(self, node, context): return self.visit_statement(node, context)", "def visit_approximate(self, node, context): return self.visit_node(node, context) def visit_with_query(self, node, context): return self.visit_node(node,", "context): return self.visit_statement(node, context) def visit_insert(self, node, context): return self.visit_node(node, context) def visit_call(self,", "self.visit_statement(node, context) def visit_show_session(self, node, context): return self.visit_statement(node, context) def visit_set_session(self, node, context):", "2.0 (the \"License\"); # you may not use this file except in compliance", "return None def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context) if node.escape is", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "node, context): return self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def", "self.process(row, context) return None def visit_row(self, node, context): for expression in node.items: self.process(expression,", "in node.queries: self.process(query, context) return None def visit_with_query(self, node, context): return self.process(node.query, context)", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "context): return self.visit_literal(node, context) def visit_in_list_expression(self, node, context): return self.visit_expression(node, context) def visit_qualified_name_reference(self,", "context): return self.visit_expression(node, context) def visit_select_item(self, node, context): return self.visit_node(node, context) def visit_single_column(self,", "def visit_add_column(self, node, context): return self.visit_statement(node, context) def visit_create_view(self, node, context): return self.visit_statement(node,", "def visit_time_literal(self, node, context): return self.visit_literal(node, context) def visit_explain_option(self, node, context): return self.visit_node(node,", "either express or implied. # See the License for the specific language governing", "return self.visit_literal(node, context) def visit_binary_literal(self, node, context): return self.visit_literal(node, context) def visit_boolean_literal(self, node,", "def visit_relation(self, node, context): return self.visit_node(node, context) def visit_query_body(self, node, context): return self.visit_relation(node,", "context) def visit_logical_binary_expression(self, node, context): return self.visit_expression(node, context) def visit_subquery_expression(self, node, context): return", "return self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def visit_if_expression(self, node,", "return self.visit_statement(node, context) def visit_show_columns(self, node, context): return self.visit_statement(node, context) def visit_show_partitions(self, node,", "context) def visit_drop_view(self, node, context): return self.visit_statement(node, context) def visit_insert(self, node, context): return", "context): return self.process(node.value, context) def visit_not_expression(self, node, context): return self.process(node.value, context) def visit_searched_case_expression(self,", "node.rows: self.process(row, context) return None def visit_row(self, node, context): for expression in node.items:", "def visit_simple_case_expression(self, node, context): return self.visit_expression(node, context) def visit_string_literal(self, node, context): return self.visit_literal(node,", "visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context) return None def visit_comparison_expression(self, node, context):", "visit_intersect(self, node, context): return self.visit_set_operation(node, context) def visit_except(self, node, context): return self.visit_set_operation(node, context)", "visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context) return None def visit_if_expression(self, node, context):", "None def visit_array_constructor(self, node, context): for expression in node.values: self.process(expression, context) return None", "context) self.process(node.right, context) return None def visit_values(self, node, context): for row in node.rows:", "visit_join(self, node, context): return self.visit_relation(node, context) def visit_exists(self, node, context): return self.visit_expression(node, context)", "self.process(node.relation, context) def visit_sampled_relation(self, node, context): self.process(node.relation, context) self.process(node.get_sample_percentage(), context) if node.get_columns_to_stratify_on().is_present(): for", "def visit_expression(self, node, context): return self.visit_node(node, context) def visit_reset_session(self, node, context): return self.visit_statement(node,", "the License. # You may obtain a copy of the License at #", "node, context): for expression in node.values: self.process(expression, context) return None def visit_subscript_expression(self, node,", "None def visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression,", "limitations under the License. from .join_criteria import JoinOn, JoinUsing from .grouping import SimpleGroupBy,", "visit_is_None_predicate(self, node, context): return self.process(node.value, context) def visit_logical_binary_expression(self, node, context): self.process(node.left, context) self.process(node.right,", "node, context): return self.visit_query_body(node, context) def visit_set_operation(self, node, context): return self.visit_query_body(node, context) def", "self.visit_expression(node, context) def visit_comparison_expression(self, node, context): return self.visit_expression(node, context) def visit_literal(self, node, context):", "visit_commit(self, node, context): return self.visit_statement(node, context) def visit_rollback(self, node, context): return self.visit_statement(node, context)", "self.visit_relation(node, context) def visit_values(self, node, context): return self.visit_query_body(node, context) def visit_row(self, node, context):", "visit_query(self, node, context): return self.visit_statement(node, context) def visit_explain(self, node, context): return self.visit_statement(node, context)", "node, context): return self.visit_query_body(node, context) def visit_row(self, node, context): return self.visit_node(node, context) def", "context): return self.visit_expression(node, context) def visit_is_not_null_predicate(self, node, context): return self.visit_expression(node, context) def visit_is_null_predicate(self,", "return self.visit_node(node, context) def visit_delete(self, node, context): return self.visit_statement(node, context) def visit_start_transaction(self, node,", "context) self.process(node.result, context) return None def visit_in_predicate(self, node, context): self.process(node.value, context) self.process(node.value_list, context)", "node, context): return self.visit_statement(node, context) def visit_show_columns(self, node, context): return self.visit_statement(node, context) def", "return None def visit_window_frame(self, node, context) self.process(node.start, context) if node.end: self.process(node.end, context) return", "in node.relations: self.process(relation, context) return None def visit_except(self, node, context): self.process(node.left, context) self.process(node.right,", "def visit_set_operation(self, node, context): return self.visit_query_body(node, context) def visit_union(self, node, context): return self.visit_set_operation(node,", "return self.process(node.query, context) def visit_sort_item(self, node, context): return self.process(node.sort_key, context) def visit_query_specification(self, node,", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "return None def visit_between_predicate(self, node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return", "return self.process(node.value, context) def visit_not_expression(self, node, context): return self.process(node.value, context) def visit_searched_case_expression(self, node,", "context) def visit_grant(self, node, context): return self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return", "return self.visit_expression(node, context) def visit_string_literal(self, node, context): return self.visit_literal(node, context) def visit_binary_literal(self, node,", "context) def visit_add_column(self, node, context): return self.visit_statement(node, context) def visit_create_view(self, node, context): return", "def visit_frame_bound(self, node, context) if node.value: self.process(node.value, context) return None \"\"\" def visit_simple_case_expression(self,", "context): return self.process(node.sort_key, context) def visit_query_specification(self, node, context): self.process(node.select, context) if node.from_: self.process(node.from_,", "context) return None def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context) return None", "visit_sort_item(self, node, context): return self.visit_node(node, context) def visit_table(self, node, context): return self.visit_query_body(node, context)", "in node.rows: self.process(row, context) return None def visit_row(self, node, context): for expression in", "def visit_grant(self, node, context): return self.visit_statement(node, context) def visit_transaction_mode(self, node, context): return self.visit_node(node,", "node, context): return self.visit_node(node, context) def visit_with(self, node, context): return self.visit_node(node, context) def", "def visit_drop_view(self, node, context): return self.visit_statement(node, context) def visit_insert(self, node, context): return self.visit_node(node,", "def visit_arithmetic_binary(self, node, context): self.process(node.left, context) self.process(node.right, context) return None def visit_between_predicate(self, node,", "visit_intersect(self, node, context): for relation in node.relations: self.process(relation, context) return None def visit_except(self,", "node, context): return self.visit_statement(node, context) def visit_grant(self, node, context): return self.visit_statement(node, context) def", "context) if node.frame: self.process(node.frame, context) return None def visit_window_frame(self, node, context) self.process(node.start, context)", "context) self.process(node.right, context) if isinstance(node.criteria, JoinOn): self.process(node.criteria.expression, context) elif isinstance(node.criteria, JoinUsing): self.process(node.criteria.columns) return", "self.visit_node(node, context) def visit_select(self, node, context): return self.visit_node(node, context) def visit_relation(self, node, context):", "node, context): return self.process(node.value, context) def visit_searched_case_expression(self, node, context): for clause in node.when_clauses:", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "node, context): return self.visit_expression(node, context) def visit_select_item(self, node, context): return self.visit_node(node, context) def", "context): for relation in node.relations: self.process(relation, context) return None def visit_intersect(self, node, context):", "node, context): return self.visit_statement(node, context) def visit_explain(self, node, context): return self.visit_statement(node, context) def", "def visit_intersect(self, node, context): for relation in node.relations: self.process(relation, context) return None def", "node, context): return self.visit_literal(node, context) def visit_binary_literal(self, node, context): return self.visit_literal(node, context) def", "expression in node.get_columns_to_stratify_on().get(): self.process(expression, context) return None def visit_join(self, node, context): self.process(node.left, context)", "return self.visit_node(node, context) def visit_isolation_level(self, node, context): return self.visit_transaction_mode(node, context) def visit_transaction_access_mode(self, node,", "node.items: self.process(expression, context) return None def visit_table_subquery(self, node, context): return self.process(node.query, context) def", "return self.visit_statement(node, context) def visit_grant(self, node, context): return self.visit_statement(node, context) def visit_transaction_mode(self, node,", "self.process(expression, context) return None def visit_subscript_expression(self, node, context): self.process(node.base, context) self.process(node.index, context) return", "node, context): self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return None def visit_coalesce_expression(self, node,", "coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the", "def visit_select(self, node, context): return self.visit_node(node, context) def visit_relation(self, node, context): return self.visit_node(node,", "sort_item in node.order_by: self.process(sort_item, context) return None def visit_union(self, node, context): for relation", "in node.when_clauses: self.process(clause, context) if node.default_value: self.process(node.default_valuee, context) return None def visit_in_list_expression(self, node,", "visit_with_query(self, node, context): return self.process(node.query, context) def visit_select(self, node, context): for item in", "self.visit_expression(node, context) def visit_long_literal(self, node, context): return self.visit_literal(node, context) def visit_logical_binary_expression(self, node, context):", "None def visit_union(self, node, context): for relation in node.relations: self.process(relation, context) return None", "None def visit_like_predicate(self, node, context): self.process(node.value, context) self.process(node.pattern, context) if node.escape is not", "visit_drop_view(self, node, context): return self.visit_statement(node, context) def visit_insert(self, node, context): return self.visit_node(node, context)", "None def visit_function_call(self, node, context): for argument in node.arguments: self.process(argument, context) if node.window:", "self.process(node.where, context) if node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns", "except in compliance with the License. # You may obtain a copy of", "node, context): return self.visit_statement(node, context) def visit_show_session(self, node, context): return self.visit_statement(node, context) def", "context) return None def visit_row(self, node, context): for expression in node.items: self.process(expression, context)", "return None def visit_row(self, node, context): for expression in node.items: self.process(expression, context) return", "self.visit_statement(node, context) def visit_current_time(self, node, context): return self.visit_expression(node, context) def visit_extract(self, node, context):", "return None def visit_at_time_zone(self, node, context): self.process(node.value, context) self.process(node.time_zone, context) return None def", "context): return self.visit_query_body(node, context) def visit_aliased_relation(self, node, context): return self.visit_relation(node, context) def visit_sampled_relation(self,", "self.process(expression, context) return None def visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context) if", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "visit_delete(self, node, context): return self.visit_statement(node, context) def visit_start_transaction(self, node, context): return self.visit_statement(node, context)", "context) for expression in node.partition: self.process(expression, context) for sort_item in node.order_by: self.process(sort_item.sort_key, context)", "pass def visit_expression(self, node, context): return self.visit_node(node, context) def visit_reset_session(self, node, context): return", "return None def visit_if_expression(self, node, context): self.process(node.condition, context) self.process(node.true_value, context) if node.false_value: self.process(node.false_value,", "context): return self.visit_relation(node, context) def visit_query_specification(self, node, context): return self.visit_query_body(node, context) def visit_set_operation(self,", "context) def visit_set_session(self, node, context): return self.visit_statement(node, context) def visit_generic_literal(self, node, context): return", "if node.window: self.process(node.window, context) return None def visit_dereference_expression(self, node, context): self.process(node.base, context) return", "context) def visit_function_call(self, node, context): return self.visit_expression(node, context) def visit_lambda_expression(self, node, context): return", "context): for expression in node.values: self.process(expression, context) return None def visit_subscript_expression(self, node, context):", "self.process(query, context) return None def visit_with_query(self, node, context): return self.process(node.query, context) def visit_select(self,", "context): self.process(node.left, context) self.process(node.right, context) return None def visit_values(self, node, context): for row", "def visit_in_predicate(self, node, context): return self.visit_expression(node, context) def visit_function_call(self, node, context): return self.visit_expression(node,", "self.visit_literal(node, context) def visit_arithmetic_unary(self, node, context): return self.visit_expression(node, context) def visit_not_expression(self, node, context):", "context) if node.where: self.process(node.where, context) if node.group_by: grouping_elements = [] if isinstance(node.group_by, SimpleGroupBy):", "for expression in node.values: self.process(expression, context) return None def visit_subscript_expression(self, node, context): self.process(node.base,", "under the License. from .join_criteria import JoinOn, JoinUsing from .grouping import SimpleGroupBy, GroupingSets", "return None def visit_join(self, node, context): self.process(node.left, context) self.process(node.right, context) if isinstance(node.criteria, JoinOn):", "visit_binary_literal(self, node, context): return self.visit_literal(node, context) def visit_boolean_literal(self, node, context): return self.visit_literal(node, context)", "if isinstance(node.group_by, SimpleGroupBy): grouping_elements = node.group_by.columns elif isinstance(node.group_by, GroupingSets): grouping_elements = node.group_by.sets for", "self.process(node.end, context) return None def visit_frame_bound(self, node, context) if node.value: self.process(node.value, context) return", "def visit_None_if_expression(self, node, context): self.process(node.first, context) self.process(node.second, context) return None def visit_if_expression(self, node,", "node, context): return self.visit_expression(node, context) def visit_null_if_expression(self, node, context): return self.visit_expression(node, context) def", "self.process(node.value, context) self.process(node.min, context) self.process(node.max, context) return None def visit_coalesce_expression(self, node, context): for", "visit_add_column(self, node, context): return self.visit_statement(node, context) def visit_create_view(self, node, context): return self.visit_statement(node, context)", "node.end: self.process(node.end, context) return None def visit_frame_bound(self, node, context) if node.value: self.process(node.value, context)", "node, context): return self.visit_node(node, context) def visit_select(self, node, context): return self.visit_node(node, context) def" ]
[ "app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method == 'POST': if", "return jsonify({'error': 'no file'}), 400 # Image info img_file = request.files.get('file') img_name =", "import Flask from flask import jsonify from flask import request, render_template from skinapp", "render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method == 'POST': if 'file' not", "jsonify from flask import request, render_template from skinapp import app from model.utils import", "if request.method == 'POST': if 'file' not in request.files: return jsonify({'error': 'no file'}),", "glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method ==", "from skinapp import app from model.utils import * from model.skinmodel import * valid_mimetypes", "image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction", "Flask from flask import jsonify from flask import request, render_template from skinapp import", "request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type # Return an error if not", "'bad-type'}) # Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))", "mimetype = img_file.content_type # Return an error if not a valid mimetype if", "import request, render_template from skinapp import app from model.utils import * from model.skinmodel", "= get_predictions(img) # Delete image when done with analysis os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) return jsonify(res)", "@app.route('/') def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST'])", "if mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image to static", "predict(): if request.method == 'POST': if 'file' not in request.files: return jsonify({'error': 'no", "from model.utils import * from model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/')", "glob from flask import Flask from flask import jsonify from flask import request,", "request.files: return jsonify({'error': 'no file'}), 400 # Image info img_file = request.files.get('file') img_name", "index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict():", "== 'POST': if 'file' not in request.files: return jsonify({'error': 'no file'}), 400 #", "os import glob from flask import Flask from flask import jsonify from flask", "request, render_template from skinapp import app from model.utils import * from model.skinmodel import", "from flask import jsonify from flask import request, render_template from skinapp import app", "static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the", "Run Prediction on the model res = get_predictions(img) # Delete image when done", "import jsonify from flask import request, render_template from skinapp import app from model.utils", "valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img", "* valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER'])", "Prediction on the model res = get_predictions(img) # Delete image when done with", "an error if not a valid mimetype if mimetype not in valid_mimetypes: return", "img_file = request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type # Return an error", "'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict',", "img_file.filename mimetype = img_file.content_type # Return an error if not a valid mimetype", "flask import request, render_template from skinapp import app from model.utils import * from", "# Image info img_file = request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type #", "= img_file.content_type # Return an error if not a valid mimetype if mimetype", "= open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the model res = get_predictions(img) #", "def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def", "from flask import Flask from flask import jsonify from flask import request, render_template", "img_name)) # Run Prediction on the model res = get_predictions(img) # Delete image", "img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the model res =", "'no file'}), 400 # Image info img_file = request.files.get('file') img_name = img_file.filename mimetype", "= request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type # Return an error if", "Image info img_file = request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type # Return", "flask import jsonify from flask import request, render_template from skinapp import app from", "import app from model.utils import * from model.skinmodel import * valid_mimetypes = ['image/jpeg',", "file'}), 400 # Image info img_file = request.files.get('file') img_name = img_file.filename mimetype =", "from flask import request, render_template from skinapp import app from model.utils import *", "Return an error if not a valid mimetype if mimetype not in valid_mimetypes:", "skinapp import app from model.utils import * from model.skinmodel import * valid_mimetypes =", "import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\" %", "img_file.content_type # Return an error if not a valid mimetype if mimetype not", "Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run", "a valid mimetype if mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write", "res = get_predictions(img) # Delete image when done with analysis os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) return", "request.method == 'POST': if 'file' not in request.files: return jsonify({'error': 'no file'}), 400", "= img_file.filename mimetype = img_file.content_type # Return an error if not a valid", "400 # Image info img_file = request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type", "% app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method == 'POST':", "img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the model res = get_predictions(img)", "info img_file = request.files.get('file') img_name = img_file.filename mimetype = img_file.content_type # Return an", "samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method == 'POST': if 'file' not in", "return jsonify({'error': 'bad-type'}) # Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img =", "on the model res = get_predictions(img) # Delete image when done with analysis", "to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on", "jsonify({'error': 'bad-type'}) # Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'],", "= glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method", "valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return", "@app.route('/predict', methods=['POST']) def predict(): if request.method == 'POST': if 'file' not in request.files:", "samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if", "not in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'],", "img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the model res", "open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the model res = get_predictions(img) # Delete", "not a valid mimetype if mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'}) #", "* from model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index(): samples", "mimetype if mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image to", "# Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) #", "return render_template('index.html', samples=samples) @app.route('/predict', methods=['POST']) def predict(): if request.method == 'POST': if 'file'", "render_template from skinapp import app from model.utils import * from model.skinmodel import *", "from model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index(): samples =", "import os import glob from flask import Flask from flask import jsonify from", "# Run Prediction on the model res = get_predictions(img) # Delete image when", "mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image to static directory", "app from model.utils import * from model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png']", "'POST': if 'file' not in request.files: return jsonify({'error': 'no file'}), 400 # Image", "import * from model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index():", "jsonify({'error': 'no file'}), 400 # Image info img_file = request.files.get('file') img_name = img_file.filename", "flask import Flask from flask import jsonify from flask import request, render_template from", "model.utils import * from model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def", "if 'file' not in request.files: return jsonify({'error': 'no file'}), 400 # Image info", "model.skinmodel import * valid_mimetypes = ['image/jpeg', 'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\"", "methods=['POST']) def predict(): if request.method == 'POST': if 'file' not in request.files: return", "'file' not in request.files: return jsonify({'error': 'no file'}), 400 # Image info img_file", "if not a valid mimetype if mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'})", "in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image to static directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))", "= ['image/jpeg', 'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html',", "img_name = img_file.filename mimetype = img_file.content_type # Return an error if not a", "in request.files: return jsonify({'error': 'no file'}), 400 # Image info img_file = request.files.get('file')", "def predict(): if request.method == 'POST': if 'file' not in request.files: return jsonify({'error':", "the model res = get_predictions(img) # Delete image when done with analysis os.remove(os.path.join(app.config['UPLOAD_FOLDER'],", "model res = get_predictions(img) # Delete image when done with analysis os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))", "# Return an error if not a valid mimetype if mimetype not in", "error if not a valid mimetype if mimetype not in valid_mimetypes: return jsonify({'error':", "directory img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name)) # Run Prediction on the model", "valid mimetype if mimetype not in valid_mimetypes: return jsonify({'error': 'bad-type'}) # Write image", "import glob from flask import Flask from flask import jsonify from flask import", "not in request.files: return jsonify({'error': 'no file'}), 400 # Image info img_file =", "['image/jpeg', 'image/png'] @app.route('/') def index(): samples = glob.glob(\"%s/*\" % app.config['SAMPLE_FOLDER']) return render_template('index.html', samples=samples)" ]
[ "= left self.right = right class Solution: def getMinimumDifference(self, root): vals = []", "= abs(vals[1]-vals[0]) for i in range(1, len(vals)): min_dist = min(min_dist, abs(vals[i]-vals[i-1])) return min_dist", "Search Tree (BST), return the minimum absolute difference between the values of any", "left=None, right=None): self.val = val self.left = left self.right = right class Solution:", "self.right = right class Solution: def getMinimumDifference(self, root): vals = [] def in_order(node):", "min absolute difference in BST Given the root of a Binary Search Tree", "between the values of any two different nodes in the tree. \"\"\" #", "= val self.left = left self.right = right class Solution: def getMinimumDifference(self, root):", "= [] def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist", "TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right", "vals = [] def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root)", "__init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right", "vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i in range(1, len(vals)):", "nodes in the tree. \"\"\" # Definition for a binary tree node. class", "difference in BST Given the root of a Binary Search Tree (BST), return", "a Binary Search Tree (BST), return the minimum absolute difference between the values", "Tree (BST), return the minimum absolute difference between the values of any two", "two different nodes in the tree. \"\"\" # Definition for a binary tree", "self.val = val self.left = left self.right = right class Solution: def getMinimumDifference(self,", "return the minimum absolute difference between the values of any two different nodes", "the minimum absolute difference between the values of any two different nodes in", "def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right =", "val=0, left=None, right=None): self.val = val self.left = left self.right = right class", "node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left =", "= right class Solution: def getMinimumDifference(self, root): vals = [] def in_order(node): if", "left self.right = right class Solution: def getMinimumDifference(self, root): vals = [] def", "absolute difference between the values of any two different nodes in the tree.", "Given the root of a Binary Search Tree (BST), return the minimum absolute", "absolute difference in BST Given the root of a Binary Search Tree (BST),", "getMinimumDifference(self, root): vals = [] def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right:", "the values of any two different nodes in the tree. \"\"\" # Definition", "\"\"\" # Definition for a binary tree node. class TreeNode: def __init__(self, val=0,", "any two different nodes in the tree. \"\"\" # Definition for a binary", "root of a Binary Search Tree (BST), return the minimum absolute difference between", "tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left", "[] def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist =", "<filename>Q530.py \"\"\" 530 easy min absolute difference in BST Given the root of", "Solution: def getMinimumDifference(self, root): vals = [] def in_order(node): if node.left: in_order(node.left) vals.append(node.val)", "the root of a Binary Search Tree (BST), return the minimum absolute difference", "BST Given the root of a Binary Search Tree (BST), return the minimum", "self.left = left self.right = right class Solution: def getMinimumDifference(self, root): vals =", "in_order(root) min_dist = abs(vals[1]-vals[0]) for i in range(1, len(vals)): min_dist = min(min_dist, abs(vals[i]-vals[i-1]))", "root): vals = [] def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right)", "node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i in", "binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val", "the tree. \"\"\" # Definition for a binary tree node. class TreeNode: def", "tree. \"\"\" # Definition for a binary tree node. class TreeNode: def __init__(self,", "node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i in range(1, len(vals)): min_dist =", "for a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val", "min_dist = abs(vals[1]-vals[0]) for i in range(1, len(vals)): min_dist = min(min_dist, abs(vals[i]-vals[i-1])) return", "in the tree. \"\"\" # Definition for a binary tree node. class TreeNode:", "Definition for a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None):", "class Solution: def getMinimumDifference(self, root): vals = [] def in_order(node): if node.left: in_order(node.left)", "if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i", "if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i in range(1, len(vals)): min_dist", "in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i in range(1, len(vals)): min_dist = min(min_dist,", "in BST Given the root of a Binary Search Tree (BST), return the", "right=None): self.val = val self.left = left self.right = right class Solution: def", "right class Solution: def getMinimumDifference(self, root): vals = [] def in_order(node): if node.left:", "def getMinimumDifference(self, root): vals = [] def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if", "minimum absolute difference between the values of any two different nodes in the", "Binary Search Tree (BST), return the minimum absolute difference between the values of", "different nodes in the tree. \"\"\" # Definition for a binary tree node.", "530 easy min absolute difference in BST Given the root of a Binary", "a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val =", "# Definition for a binary tree node. class TreeNode: def __init__(self, val=0, left=None,", "in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for i in range(1,", "class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left", "easy min absolute difference in BST Given the root of a Binary Search", "values of any two different nodes in the tree. \"\"\" # Definition for", "in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0]) for", "val self.left = left self.right = right class Solution: def getMinimumDifference(self, root): vals", "difference between the values of any two different nodes in the tree. \"\"\"", "of any two different nodes in the tree. \"\"\" # Definition for a", "def in_order(node): if node.left: in_order(node.left) vals.append(node.val) if node.right: in_order(node.right) in_order(root) min_dist = abs(vals[1]-vals[0])", "of a Binary Search Tree (BST), return the minimum absolute difference between the", "(BST), return the minimum absolute difference between the values of any two different", "\"\"\" 530 easy min absolute difference in BST Given the root of a" ]
[ "key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"]", "linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text(", "[] others = [] finalize = [] jitget = [] DETAIL = {", "= 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as", "'SCALE': scale_l.append( dat_ ) except: print( \"Failed\", data ) scale_l = np.array( scale_l", "dat_ ) if key == \"READ_OBS\": dat_ -= dat_jit_ print( \"#### \", key,", "print( \"Failed\", data ) elif '##### TIMER' in l: data = l.split() try:", "\"Failed\", data ) elif '[Info:DA]' in l: data = l.split() try: ctimes.append( float(", ") xlab = 'Computation time (s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel(", "data[5] ) if tit_ == 'SCALE': i += 1 if tit_ == \"WRITE\":", "\"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif", "\"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\",", "DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data )", "assimilation\", \"30-min forecast\" ] pnum_l = [ \"(a)\", \"(b)\" ] for i, ax", "'{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) ) ) print(", "continue print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 )", "dat_ ) except: print( \"Failed\", data ) scale_l = np.array( scale_l ) key_l", "ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins = int( ( xmax - xmin", "num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_", "DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )]", "key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_'", "key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL )", "= 'Memory copy' elif lab == 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab,", "'Computation time (s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_", ") ] num = len( dat_ ) if key == \"READ_OBS\": dat_ -=", "0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, )", "which='both', bottom=False, top=False, labelbottom=False ) ax_l = [ ax1, ax2 ] tit_l =", "transform=ax.transAxes, ha='right', va='top' ) tit_ = key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes,", "#USE_ARCH_DAT = False quick_hist = False quick_bar = True quick_bar = False def", "bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') #### SUM = { \"SCALE\": 0.0, \"LETKF\":", "f.is_file() ] ftimes = [] ctimes = [] path_l = [] init =", "< min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan #dat_jit_test =", "4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0,", "bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 = 0.8 #c_l", "min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[", "va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show() else:", "as npz: for key in SUM.keys(): SUM[key] = npz[key] ftimes = np.load( fn_ftimes,", "1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 )", "reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0,", "dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) =", "= 4000 #dat_.size ls = 'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax,", "ftimes.append( float( data[7] ) ) except: print( \"Failed\", data ) elif '[Info:DA]' in", "import sys from datetime import datetime, timedelta import numpy as np data_path =", "dic={} ): import matplotlib.pyplot as plt fig, ax = plt.subplots( 1, 1, figsize=(5,5)", "\"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\":", ") def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt fig, ( ax1,ax2", "ax1, ax2 ] tit_l = [ \"Data assimilation\", \"30-min forecast\" ] pnum_l =", ") #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else: print( 'Not plot ', key)", "dic.keys() ): lab = key if lab == 'OBS': lab = 'Obs pre-\\nprocessing'", "\"Failed\", data ) return( ftimes, ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600 ):", "in lines: if '##### TIMER' in l: data = l.split() try: tit_ =", "0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue',", "DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test", "= [] read_gues = [] gues_mean = [] write_restartg = [] das_letkf =", "continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] #", "\"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan array iarray", "labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray',", "alpha=0.5 ) width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' )", "print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data = l.split() try: tit_", "else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]),", "mode_, mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print( 'Not", "data[5] ) if tit_ == \"WRITE\": dat_ = float( data[6] ) if tit4_", "] tit_l = [ \"Data assimilation\", \"30-min forecast\" ] pnum_l = [ \"(a)\",", "DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes", "os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ] path_l.append( os.path.join( top,", "'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue print( \"check\",", "= data[4] dat_ = float( data[5] ) if tit_ == 'SCALE': i +=", "np.std( ftimes, ddof=1 ), len( ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False, top=False,", "= -1 for path in path_l: if not os.path.isfile( path ): break with", "TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] +=", "\"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan array iarray =", "np.power( dat.size, 1.0/3.0) #bins = int( ( xmax - xmin ) / h", "right=0.95, top=0.92, ) rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6", "transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax ) xlab", "key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"]", "DETAIL[key] ) dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit )", "fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT:", "0.1) plt.clf() plt.close('all') #### SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0,", "hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue',", "os import sys from datetime import datetime, timedelta import numpy as np data_path", "dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print( 'Not plot ', key) for key", "\"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\":", "dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:'", "): break with open( path ) as f: lines = f.readlines() for l", "= [] finalize = [] jitget = [] DETAIL = { \"SCALE\": scale,", "tit_ == 'SCALE': scale_l.append( dat_ ) except: print( \"Failed\", data ) scale_l =", "deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare", "[] das_letkf = [] anal_mean = [] write_anal = [] deallocate = []", "\"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in", "plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={},", "yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange(", "= d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5],", "ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600 ): dirs = [ f.name for", "2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06,", "dic2={} ): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots( 1,", "print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) ) )", ") fig, ax = plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95,", "#dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat)", "/ h ) # Square-root choice bins = int( np.sqrt( dat.size ) )", "= data[4] dat_ = float( data[5] ) if tit_ == \"WRITE\": dat_ =", "\"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate,", "= '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test,", "3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins = int(", "if key == \"READ_OBS\": dat_ -= dat_jit_ print( \"#### \", key, time_, num,", "iarray ) # Get computation time for all i = -1 for path", "), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0", "'y', 'k' ] acm = 0.0 for i, key in enumerate( dic.keys() ):", "das_letkf = [] anal_mean = [] write_anal = [] deallocate = [] write_restarta", "ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01,", "mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt fig, (", "dat_ = float( data[5] ) if tit_ == 'SCALE': scale_l.append( dat_ ) except:", "-1 for path in path_l: if not os.path.isfile( path ): break with open(", "RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except:", "ctimes, DETAIL = d4_computation_time( top=top, ) ctimes = np.array( ctimes ) print( '{0:}", "# Get computation time for SCALE for path in path_l: if not os.path.isfile(", "exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist = False quick_bar =", "pnum_l = [ \"(a)\", \"(b)\" ] for i, ax in enumerate( ax_l ):", "np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit", "elif '##### TIMER' in l: data = l.split() try: tit_ = data[3] tit4_", "\"#### \", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if", "stats xmin = 0 xmax = 60 # Scott's choise #h = 3.5", "\"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\":", "lab == 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm +=", "ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show() else:", "key ) if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\":", "DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data )", "elif lab == 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm", "ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l = [", "0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim(", "or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\":", "fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold'", "\"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg,", ") #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes ) #plot_bar_2p_scale( dic=SUM, dic2=SUM_test, ftimes=ftimes", "( xmax - xmin ) / h ) # Square-root choice bins =", "] #c_l = [ 'cyan', 'magenta', 'y', 'k' ] acm = 0.0 for", "mean_ else: print( 'Not plot ', key) for key in DETAIL_MODE.keys(): print( key", "= False quick_hist = False quick_bar = True quick_bar = False def d4_computation_time_nparray(", "ftimes, ddof=1 ), len( ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False", "} for key in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif", "fn_sum, allow_pickle=True ) as npz: for key in SUM.keys(): SUM[key] = npz[key] ftimes", "SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"]", "'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]", "axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4", "2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False,", "write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare file path list", ") scale_l = [] # Get computation time for SCALE for path in", "ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12", "acm = 0.0 for i, key in enumerate( dic.keys() ): lab = key", "dirs: path_l.append( os.path.join( top, dir_, ) ) scale_l = [] # Get computation", "import matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4)", "== \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key == \"READ_GUES\" or key ==", "import stats xmin = 0 xmax = 60 # Scott's choise #h =", ").replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if quick_hist: plt.show()", "sys from datetime import datetime, timedelta import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\"", "def d4_computation_time_nparray( top='' ): dirs = [ f.name for f in os.scandir( top", "as plt from scipy import stats xmin = 0 xmax = 60 #", "> 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else:", "ftimes=np.array([]) ): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots( 1,", "ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2,", "ctimes ) print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes)", "try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append( dat_ ) except:", "= [] jitget = [] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope,", "timedelta import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT", ") #.replace( '(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig", "'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l", "print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2", "in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ] path_l.append( os.path.join(", "'_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if quick_hist:", "top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top, ) ctimes", "0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ = key ax.text( 0.5,", "ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size", "Get computation time for SCALE for path in path_l: if not os.path.isfile( path", "dir_ in dirs: path_l.append( os.path.join( top, dir_, ) ) scale_l = [] #", "& ~np.isnan( dat_jit ) ] num = len( dat_ ) if key ==", "fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 =", "= np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print( key, dat ) #dat_ =", ")] = np.nan time_ = np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_ =", "rn ) mode = np.mean( rbins[imode:imode+2] ) mean = np.mean( dat ) #print(", "color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1 ), len( ftimes ) ) ax2.tick_params(", "( read_obs_ > max_read_obs )] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for", "= '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray(", "print( \"Failed\", data ) elif '[Info:DA]' in l: data = l.split() try: ctimes.append(", "if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] +=", "data[4] dat_ = float( data[5] ) if tit_ == \"WRITE\": dat_ = float(", "] for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs ) | (", "np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True ) as npz: for", "\"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\",", ") def d4_computation_time( top='', ctmax=600 ): dirs = [ f.name for f in", "RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare file path", "# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels),", ") # Get computation time for all i = -1 for path in", "= np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL = {} for key in", "1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15,", "ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') #### SUM = { \"SCALE\": 0.0,", ") print(\"\") DETAIL_MODE = { } DETAIL_MODE_test = { } min_read_obs = 1.0", "import datetime, timedelta import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True", "0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig", "= 'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab = 'Memory copy' elif", "\"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum =", "d4_computation_time( top=top, ) ctimes = np.array( ctimes ) print( '{0:} average: {1:} (N:", "= np.nan time_ = np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print( key, dat", "| ( read_obs_test > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL_test[key] )", "= plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l", "[] init_others = [] init_letkf = [] scale = [] others = []", "= [] init_others = [] init_letkf = [] scale = [] others =", "= DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ] num =", ") ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 =", "= [] path_l = [] init = [] init_others = [] init_letkf =", "elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum,", "= [] gues_mean = [] write_restartg = [] das_letkf = [] anal_mean =", "xmin, xmax ) ax.set_ylim( ymin, ymax ) xlab = 'Computation time (s)' ylab", "f.readlines() for l in lines: if '[Info:fcst] End forecast' in l: data =", "fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),", "= np.arange( 0, 22, 2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks )", "gues_mean = [] write_restartg = [] das_letkf = [] anal_mean = [] write_anal", "width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l =", "'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue ax1.bar( 1.0,", "d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top, )", "data ) elif '......jitdt_read_toshiba:jitget:' in l: data = l.split() try: tit_ = \"JIT_GET\"", "bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13", "set_grid = [] read_gues = [] gues_mean = [] write_restartg = [] das_letkf", "ls = 'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color", "TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] +=", ") fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue', 'limegreen',", ") ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print( ofig", "f: lines = f.readlines() for l in lines: if '[Info:fcst] End forecast' in", "data_path, ) if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test =", "ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( ' ', '_' ).replace(", "~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs ) |", "] path_l.append( os.path.join( top, dir_, fname ) ) # Get computation time for", "f.name for f in os.scandir( top ) ] #if f.is_file() ] path_l =", "= DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs", "= { } DETAIL_MODE_test = { } min_read_obs = 1.0 max_read_obs = 30.0", "RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare", "'Memory copy' elif lab == 'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar( 2.0,", "plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') #### SUM = { \"SCALE\":", "np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE ) #print( SUM_test )", "DETAIL[key] ) ) if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_", "= np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE ) #print( SUM_test", "**SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True )", "0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ = key ax.text( 0.5, 1.01,", "acm += dic[key] acm2 = 0.0 for i, key in enumerate( dic2.keys() ):", "path ) as f: lines = f.readlines() for l in lines: if '#####", "except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data = l.split() try:", "\"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\",", ") as f: lines = f.readlines() for l in lines: if '##### TIMER'", "dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode = np.argmax( rn ) mode =", "np.nanmin( DETAIL[key] ) ) if num > 100: mode_, mean_ = plot_hist( key=key,", "linewidths=lw, linestyles=ls, color=color ) color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls,", "[] ctimes = [] path_l = [] init = [] init_others = []", "= f.readlines() for l in lines: if '[Info:fcst] End forecast' in l: data", ") ] dat_ = dat[ ~np.isnan(dat) ] num = len( dat_ ) #", "/ np.power( dat.size, 1.0/3.0) #bins = int( ( xmax - xmin ) /", "for f in os.scandir( top ) ] #if f.is_file() ] ftimes = []", "tit4_ = data[4] dat_ = float( data[5] ) if tit_ == \"WRITE\": dat_", "0, 1.0 ) yticks = np.arange( 0, 22, 2 ) ax1.set_ylim( 0, 20.0", "bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 = 0.8 #c_l", "DETAIL[tit_][i] = dat_ except: print( \"Failed\", data ) return( ftimes, ctimes, DETAIL )", "dat_ except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data = l.split()", ") if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\":", "= 'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches", "1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] # ax.legend( fontsize=12,", ") ) ] #if f.is_file() ] path_l.append( os.path.join( top, dir_, fname ) )", "dat ) #print( len(rn), len(rbins), mode ) lw = 1.0 ymin = 0.0", "= dat_ except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data =", "for key in DETAIL_MODE.keys(): print( key ) if key == \"SCALE\": SUM[\"SCALE\"] +=", "\"READ_OBS\": dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL[key] ),", ") DETAIL_MODE_test[key] = mean_ else: print( 'Not plot ', key) for key in", "dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat) ] num", "path_l = [] init = [] init_others = [] init_letkf = [] scale", ") imode = np.argmax( rn ) mode = np.mean( rbins[imode:imode+2] ) mean =", "', key) for key in DETAIL_MODE.keys(): print( key ) if key == \"SCALE\":", "npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE )", "else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar( dic={} ):", "data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp'", "= 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color = 'k'", "= { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for key", "True #USE_ARCH_DAT = False quick_hist = False quick_bar = True quick_bar = False", "print( \"Failed\", data ) return( ftimes, ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600", "y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0", ") DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data ) for key in DETAIL.keys():", "print( \"Failed\", data ) scale_l = np.array( scale_l ) key_l = [ \"SCALE\",", "= l.split() try: ctimes.append( float( data[6] ) ) except: print( \"Failed\", data )", "ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( '", "deallocate = [] write_restarta = [] others = [] finalize = [] jitget", "others = [] read_obs = [] obsope = [] process_obs = [] set_grid", "else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l:", "ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax,", "= [ f.name for f in os.scandir( top ) ] #if f.is_file() ]", "= [ \"Data assimilation\", \"30-min forecast\" ] pnum_l = [ \"(a)\", \"(b)\" ]", "ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf()", "l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i] = dat_", ") DETAIL[tit_][i] = dat_ except: print( \"Failed\", data ) return( ftimes, ctimes, DETAIL", "DETAIL_test[key] print( key, dat ) #dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test )", "d4_computation_time( top='', ctmax=600 ): dirs = [ f.name for f in os.scandir( top", ") key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\",", "wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 = 0.8 #c_l = [ 'firebrick',", "= np.mean( dat ) #print( len(rn), len(rbins), mode ) lw = 1.0 ymin", "[ \"Data assimilation\", \"30-min forecast\" ] pnum_l = [ \"(a)\", \"(b)\" ] for", "ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig", "(s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 32, 2", "list for dir_ in dirs: path_l.append( os.path.join( top, dir_, ) ) scale_l =", "for f in os.scandir( top ) ] #if f.is_file() ] path_l = []", "transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' )", "xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar(", "[] write_anal = [] deallocate = [] write_restarta = [] others = []", ") ) # Get computation time for path in path_l: if not os.path.isfile(", "-= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key]", "lab = 'Memory copy' elif lab == 'JIT-DT': continue print( \"check\", dic2[key] )", "fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),", "+= DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA", "+= DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key]", "in os.scandir( top ) ] #if f.is_file() ] ftimes = [] ctimes =", "ymin, ymax ) xlab = 'Computation time (s)' ylab = 'Frequency' ax.set_xlabel( xlab,", "= float( data[5] ) if tit_ == 'SCALE': scale_l.append( dat_ ) except: print(", "scale_l.append( dat_ ) except: print( \"Failed\", data ) scale_l = np.array( scale_l )", "ax = plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )", "= [] process_obs = [] set_grid = [] read_gues = [] gues_mean =", "DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top, ) ctimes = np.array( ctimes", "SUM[key] = npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print(", "top='', ctmax=600 ): dirs = [ f.name for f in os.scandir( top )", "[] read_obs = [] obsope = [] process_obs = [] set_grid = []", ") else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in", "bottom=False, top=False, labelbottom=False ) ax_l = [ ax1, ax2 ] tit_l = [", ")['ftimes'] print( SUM ) #print( DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test )", "lab = 'Memory copy' elif lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm,", "num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if num > 100: mode_,", "s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top'", "plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins,", "- xmin ) / h ) # Square-root choice bins = int( np.sqrt(", "data = l.split() try: tit_ = data[3] tit4_ = data[4] dat_ = float(", "left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax),", "min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan time_ = np.nanmean(", "= data[3] dat_ = float( data[5] ) if tit_ == 'SCALE': scale_l.append( dat_", "\"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, )", "scale = [] others = [] read_obs = [] obsope = [] process_obs", "= [] others = [] finalize = [] jitget = [] DETAIL =", "\"OBS\": 0.0, \"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys(): if key == \"SCALE\":", "iarray = np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL = {} for key", "= \"WRITE RESTART/GRADS(GUES)\" i_ = i if i_ < 0: i_ = 0", "time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if num > 100:", "l in lines: if '##### TIMER' in l: data = l.split() try: tit_", "data = l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i]", "\"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others,", "color=c_l[i], width=width1 ) acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles,", "path_l.append( os.path.join( top, dir_, ) ) scale_l = [] # Get computation time", ") ) scale_l = [] # Get computation time for SCALE for path", "try: ctimes.append( float( data[6] ) ) except: print( \"Failed\", data ) elif '#####", "= \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist", "= { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\":", "scale_l.shape ) iarray[:] = np.nan DETAIL = {} for key in key_l: if", "data ) return( ftimes, ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600 ): dirs", "dat_ ) except: print( \"Failed\", data ) for key in DETAIL.keys(): DETAIL[key] =", "left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 = 0.8", "choice bins = int( np.sqrt( dat.size ) ) fig, ax = plt.subplots( 1,", "} # Prepare file path list for dir_ in dirs: fname = 'job.o'", "= 0.1) plt.clf() plt.close('all') #### SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\":", "Square-root choice bins = int( np.sqrt( dat.size ) ) fig, ax = plt.subplots(", "{ \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for key in", "key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else: print( 'Not plot", "print( ofig ) if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1)", "0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path,", "xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( ' ', '_' ).replace( '/',", "ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL", "mean, mode, dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' )", ") ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax ) xlab = 'Computation time", "else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes )", "in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\":", "fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks", "DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez(", "DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\":", "\"30-min forecast\" ] pnum_l = [ \"(a)\", \"(b)\" ] for i, ax in", "\"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\":", "= [] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\":", "dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots(", "ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1", "gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE", "read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs ) |", "0: i_ = 0 if tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_]", "right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0", "top ) ] #if f.is_file() ] path_l = [] ftimes = [] ctimes", "# Get computation time for all i = -1 for path in path_l:", "if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_", "def d4_computation_time( top='', ctmax=600 ): dirs = [ f.name for f in os.scandir(", "fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if", "top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 )", "ymax = 4000 #dat_.size ls = 'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin,", "ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\",", "ctimes = [] # Prepare file path list for dir_ in dirs: path_l.append(", "ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( ' ', '_' ).replace( '/', '_' )", "DETAIL = d4_computation_time( top=top, ) ctimes = np.array( ctimes ) print( '{0:} average:", "pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot as plt", "= np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][ (", "~np.isnan(dat) ] num = len( dat_ ) # if key == \"READ_OBS\": #", "| ( read_obs_ > max_read_obs )] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ]", "left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]", "np.copy( iarray ) # Get computation time for all i = -1 for", "TIMER' in l: data = l.split() try: tit_ = data[3] tit4_ = data[4]", "), np.nanmin( DETAIL_test[key] ) ) if num > 100: mode_, mean_ = plot_hist(", "finalize, \"JIT_GET\": jitget, } # Prepare file path list for dir_ in dirs:", ") ax_l = [ ax1, ax2 ] tit_l = [ \"Data assimilation\", \"30-min", "= {} for key in key_l: if key == 'SCALE': DETAIL[key] = scale_l", "= 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins =", "# SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else:", "ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return( mode, mean ) def plot_bar_2p(", "copy' elif lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1", "bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00)", "read_obs_ > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL[key] ) dat =", "= [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick', 'gray',", "tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i if i_ < 0: i_ =", "path list for dir_ in dirs: path_l.append( os.path.join( top, dir_, ) ) scale_l", "os.scandir( top ) ] #if f.is_file() ] ftimes = [] ctimes = []", "1.0 ymin = 0.0 ymax = 4000 #dat_.size ls = 'dashed' color =", "dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat) ] num = len( dat_ )", "\"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE", "top, dir_, fname ) ) # Get computation time for path in path_l:", "'limegreen', 'gold' ] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l =", "yticks ) ofig = 'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig(", "dir_ ) ) ] #if f.is_file() ] path_l.append( os.path.join( top, dir_, fname )", "path ) as f: lines = f.readlines() for l in lines: if '[Info:fcst]", "data = l.split() try: tit_ = data[3] dat_ = float( data[5] ) if", "as f: lines = f.readlines() for l in lines: if '##### TIMER' in", "1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1 ),", "dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] )", "np.nan DETAIL = {} for key in key_l: if key == 'SCALE': DETAIL[key]", "Prepare file path list for dir_ in dirs: fname = 'job.o' #[ f.name", "top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 )", "l: data = l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] )", "\"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i] = dat_ except: print( \"Failed\", data", "\"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan array iarray = np.zeros(", "= '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top", "in l: data = l.split() try: tit_ = data[3] tit4_ = data[4] dat_", ") if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"]", "dat=np.array([]) ): import matplotlib.pyplot as plt from scipy import stats xmin = 0", "right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 = 0.8 #c_l =", "'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l", "< 0: i_ = 0 if tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else:", "4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 )", "= np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan(", "0 xmax = 60 # Scott's choise #h = 3.5 * np.std( dat,", "dir_, ) ) scale_l = [] # Get computation time for SCALE for", ") as f: lines = f.readlines() for l in lines: if '[Info:fcst] End", "reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim(", "i_ < 0: i_ = 0 if tit_ in DETAIL: DETAIL[tit_][i_] = dat_", "= plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn,", "DETAIL_MODE[key] = mean_ else: print( 'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test", "# prepare nan array iarray = np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL", "== \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or key ==", "[] others = [] read_obs = [] obsope = [] process_obs = []", "False def d4_computation_time_nparray( top='' ): dirs = [ f.name for f in os.scandir(", "break with open( path ) as f: lines = f.readlines() for l in", "fig, ax = plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92,", "read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan #dat_jit_test", "float( data[1] ) DETAIL[tit_][i] = dat_ except: print( \"Failed\", data ) return( ftimes,", "0.1) plt.clf() plt.close('all') return( mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ): import", "data[3] tit4_ = data[4] dat_ = float( data[5] ) if tit_ == \"WRITE\":", "init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\":", "< min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan dat_jit_ =", "== 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm += dic[key]", "labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time", "Get computation time for path in path_l: if not os.path.isfile( path ): break", ") if tit_ == 'SCALE': i += 1 if tit_ == \"WRITE\": dat_", "fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE ) #print( SUM_test ) #print(", "= 0.0 ymax = 4000 #dat_.size ls = 'dashed' color = 'b' ax.vlines(", "#if f.is_file() ] path_l = [] ftimes = [] ctimes = [] #", "xlab = 'Computation time (s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab,", "try: tit_ = data[3] dat_ = float( data[5] ) if tit_ == 'SCALE':", "key == 'SCALE': DETAIL[key] = scale_l else: DETAIL[key] = np.copy( iarray ) #", "np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][ ( read_obs_", "ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode = np.argmax( rn ) mode", "dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01,", "'gray', 'goldenrod', 'k' ] #c_l = [ 'cyan', 'magenta', 'y', 'k' ] acm", "] #if f.is_file() ] path_l = [] ftimes = [] ctimes = []", "ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120],", "color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0,", "== \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\":", "] # prepare nan array iarray = np.zeros( scale_l.shape ) iarray[:] = np.nan", ") ax.set_ylim( ymin, ymax ) xlab = 'Computation time (s)' ylab = 'Frequency'", "prepare nan array iarray = np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL =", "== \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] +=", "xmax = 60 # Scott's choise #h = 3.5 * np.std( dat, ddof=1", "if '[Info:fcst] End forecast' in l: data = l.split() try: ftimes.append( float( data[7]", "ctimes ), len(ctimes) ) ) print( '{0:} average: {1:} (N: {2:})'.format( \"fcst \",", "data ) scale_l = np.array( scale_l ) key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\",", ") ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0,", "] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue',", "\"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf,", "> max_read_obs )] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key in", "label=lab, color=c_l[i], width=width1 ) acm += dic[key] acm2 = 0.0 for i, key", "print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1)", "computation time for SCALE for path in path_l: if not os.path.isfile( path ):", "in path_l: if not os.path.isfile( path ): break with open( path ) as", "ctimes = [] path_l = [] init = [] init_others = [] init_letkf", "time for SCALE for path in path_l: if not os.path.isfile( path ): break", "xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0,", ") if tit_ == \"WRITE\": dat_ = float( data[6] ) if tit4_ ==", "\"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data = l.split() try: tit_ =", "for path in path_l: if not os.path.isfile( path ): break with open( path", "'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l = [ 'cyan', 'magenta', 'y', 'k'", "dat.size ) ) fig, ax = plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15,", ")] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][", "pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print( ofig )", "xmax - xmin ) / h ) # Square-root choice bins = int(", "#c_l = [ 'cyan', 'magenta', 'y', 'k' ] acm = 0.0 for i,", "0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys(): if key ==", "np.argmax( rn ) mode = np.mean( rbins[imode:imode+2] ) mean = np.mean( dat )", "as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT", "for key in key_l: if key == 'SCALE': DETAIL[key] = scale_l else: DETAIL[key]", "= [] init = [] init_others = [] init_letkf = [] scale =", "0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format(", "( read_obs_test > max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for", "plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return( mode, mean ) def", "\", np.mean( ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE = { } DETAIL_MODE_test", "read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs ) |", "path list for dir_ in dirs: fname = 'job.o' #[ f.name for f", "True quick_bar = False def d4_computation_time_nparray( top='' ): dirs = [ f.name for", ") ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks =", "xmin = 0 xmax = 60 # Scott's choise #h = 3.5 *", "# Get computation time for path in path_l: if not os.path.isfile( path ):", "DETAIL_MODE[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"]", "va='top' ) tit_ = key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom'", "\"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan array iarray = np.zeros( scale_l.shape )", "tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if", "key == \"READ_OBS\": dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax(", "} min_read_obs = 1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET']", "ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, )", "\"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean,", "key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax", "print( 'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ (", "[ f.name for f in os.scandir( top ) ] #if f.is_file() ] path_l", ") ] #if f.is_file() ] path_l = [] ftimes = [] ctimes =", "ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show() else:", "+= DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key]", "key in enumerate( dic2.keys() ): lab = key if lab == 'OBS': lab", "{2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) ) ) print( '{0:} average: {1:} (N:", "else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0,", "{1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE", "plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar( dic={}", "if tit_ == 'SCALE': scale_l.append( dat_ ) except: print( \"Failed\", data ) scale_l", "i, key in enumerate( dic2.keys() ): lab = key if lab == 'OBS':", "\"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i", "\"FINALIZE\", \"JIT_GET\", ] # prepare nan array iarray = np.zeros( scale_l.shape ) iarray[:]", "DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL ) def plot_hist(", "'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab = 'Memory copy' elif lab", "= DETAIL_test[key] print( key, dat ) #dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test", ") as npz: for key in SUM.keys(): SUM[key] = npz[key] ftimes = np.load(", "= int( np.sqrt( dat.size ) ) fig, ax = plt.subplots( 1, 1, figsize=(6,4)", "text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99, 0.99, text_,", "right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0", "= 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches", "), len(ftimes) ) ) print(\"\") DETAIL_MODE = { } DETAIL_MODE_test = { }", "30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs )", "key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key == \"READ_GUES\" or key", "Scott's choise #h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size,", "xmin ) / h ) # Square-root choice bins = int( np.sqrt( dat.size", "anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\":", "fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 22, 2 )", "= np.array( scale_l ) key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\",", "'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color", "dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file()", "ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange(", "0.0 for i, key in enumerate( dic.keys() ): lab = key if lab", "SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez(", "\"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes )", "0, 22, 2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x',", "TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key],", "', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')') ofig =", "data[3] dat_ = float( data[5] ) if tit_ == 'SCALE': scale_l.append( dat_ )", "= [] init_letkf = [] scale = [] others = [] read_obs =", "SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key", "1.0/3.0) #bins = int( ( xmax - xmin ) / h ) #", "ymax ) xlab = 'Computation time (s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11)", "RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i if", "0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False )", "DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"]", "npz: for key in SUM.keys(): SUM[key] = npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True", "'goldenrod', 'k' ] #c_l = [ 'cyan', 'magenta', 'y', 'k' ] acm =", "if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] =", "= [] ctimes = [] path_l = [] init = [] init_others =", ") ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 += dic[key] #", "ofig = 'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\",", "fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True", "= f.readlines() for l in lines: if '##### TIMER' in l: data =", "< min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan time_ =", "lab = 'Memory copy' elif lab == 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm,", ") #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top, ) ctimes =", "plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l =", "obsope = [] process_obs = [] set_grid = [] read_gues = [] gues_mean", "0.1) plt.clf() plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot as plt fig, ax", "~np.isnan( dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat) ] num = len( dat_", ") handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel(", "print(\"\") DETAIL_MODE = { } DETAIL_MODE_test = { } min_read_obs = 1.0 max_read_obs", "copy' elif lab == 'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key],", "): import matplotlib.pyplot as plt from scipy import stats xmin = 0 xmax", "tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i] = dat_ except: print(", "RESTART/GRADS(GUES)\" i_ = i if i_ < 0: i_ = 0 if tit_", "{2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE = {", "] dat_ = dat[ ~np.isnan(dat) ] num = len( dat_ ) # if", "= ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)',", "np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if num > 100: mode_, mean_", "rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode = np.argmax( rn", "f in os.scandir( top ) ] #if f.is_file() ] ftimes = [] ctimes", "'[Info:fcst] End forecast' in l: data = l.split() try: ftimes.append( float( data[7] )", "+= DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes,", "= DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs ) | (", "max_read_obs )] = np.nan time_ = np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_", "DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test", "alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2,", "ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5", "dat[ ~np.isnan(dat) ] num = len( dat_ ) # if key == \"READ_OBS\":", "for key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL", "TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue ax.bar( '', dic[key],", "os.path.join( top, dir_, fname ) ) # Get computation time for path in", "plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') #### SUM =", "else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l:", "in l: data = l.split() try: ftimes.append( float( data[7] ) ) except: print(", "= key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace(", "= [ \"(a)\", \"(b)\" ] for i, ax in enumerate( ax_l ): ax.text(", "key in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key ==", "plt.clf() plt.close('all') return( mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot", "0, 3.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold'", "\"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare file path list for", "f.readlines() for l in lines: if '##### TIMER' in l: data = l.split()", "enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text(", "dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ] num", "np.nan time_ = np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print( key, dat )", "= l.split() try: ftimes.append( float( data[7] ) ) except: print( \"Failed\", data )", "DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load(", "quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale(", "ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ = key ax.text(", "'SCALE': i += 1 if tit_ == \"WRITE\": dat_ = float( data[6] )", "write_restarta = [] others = [] finalize = [] jitget = [] DETAIL", "#dtime_max = 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test", "\"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path,", "DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )]", "np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ):", "'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0,", "151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',", "1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin,", ")] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][", "0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if", ") USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist = False quick_bar = True", ") ofig = 'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig,", ") fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches = ax.hist( dat,", "label=lab, color=c_l[i] ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles,", ") #dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_ = dat[", "ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange(", "others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare file path list for dir_", "60 # Scott's choise #h = 3.5 * np.std( dat, ddof=1 ) /", "elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key]", "\"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else:", "dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles),", "pad_inches = 0.1) plt.clf() plt.close('all') return( mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([])", "fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05)", "xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar( 1,", "allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test", "np.sqrt( dat.size ) ) fig, ax = plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust(", "acm2 = 0.0 for i, key in enumerate( dic2.keys() ): lab = key", "dir_ in dirs: fname = 'job.o' #[ f.name for f in os.scandir( os.path.join(", "key in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ >", ") # ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig(", "\"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\",", "bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] acm2 = 0.0 for i,", "in enumerate( dic2.keys() ): lab = key if lab == 'OBS': lab =", "top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 = 0.8 #c_l = [", "rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode =", "finalize = [] jitget = [] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\":", "#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick',", ") ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes,", "imode = np.argmax( rn ) mode = np.mean( rbins[imode:imode+2] ) mean = np.mean(", "f: lines = f.readlines() for l in lines: if '##### TIMER' in l:", "# \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes", "text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ = key ax.text( 0.5, 1.01, tit_,", "#### SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\":", "ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i],", "[] ctimes = [] # Prepare file path list for dir_ in dirs:", ") elif '##### TIMER' in l: data = l.split() try: tit_ = data[3]", "yticks = np.arange( 0, 32, 2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks", "data ) elif '##### TIMER' in l: data = l.split() try: tit_ =", ") / h ) # Square-root choice bins = int( np.sqrt( dat.size )", "[] write_restartg = [] das_letkf = [] anal_mean = [] write_anal = []", "dat ) #dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_ =", "key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif", "')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if quick_hist: plt.show() else:", "mode, dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_", "data[7] ) ) except: print( \"Failed\", data ) elif '[Info:DA]' in l: data", "in key_l: if key == 'SCALE': DETAIL[key] = scale_l else: DETAIL[key] = np.copy(", "len(ctimes) ) ) print( '{0:} average: {1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes", "# Square-root choice bins = int( np.sqrt( dat.size ) ) fig, ax =", "os.scandir( top ) ] #if f.is_file() ] path_l = [] ftimes = []", "if tit_ == \"WRITE\": dat_ = float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\":", "0, 151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0,", "if key == 'SCALE': DETAIL[key] = scale_l else: DETAIL[key] = np.copy( iarray )", "~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs ) |", "End forecast' in l: data = l.split() try: ftimes.append( float( data[7] ) )", "print( SUM ) #print( DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit()", "top, dir_, ) ) scale_l = [] # Get computation time for SCALE", "float( data[5] ) if tit_ == \"WRITE\": dat_ = float( data[6] ) if", "time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if num > 100:", "tit_ = data[3] dat_ = float( data[5] ) if tit_ == 'SCALE': scale_l.append(", "len(ftimes) ) ) print(\"\") DETAIL_MODE = { } DETAIL_MODE_test = { } min_read_obs", "else: print( 'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[", "\"Failed\", data ) elif '##### TIMER' in l: data = l.split() try: tit_", "acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels()", "num = len( dat_ ) if key == \"READ_OBS\": dat_ -= dat_jit_ print(", "ax.set_ylim( ymin, ymax ) xlab = 'Computation time (s)' ylab = 'Frequency' ax.set_xlabel(", "for i, ax in enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes,", "== 'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue ax1.bar(", "'/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top,", "mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print( 'Not plot", "= [] deallocate = [] write_restarta = [] others = [] finalize =", "DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p(", "x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean,", "\"#### \", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if", ") #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue',", ") elif '......jitdt_read_toshiba:jitget:' in l: data = l.split() try: tit_ = \"JIT_GET\" dat_", "i_ = 0 if tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] =", "[] scale = [] others = [] read_obs = [] obsope = []", "top=top, ) ctimes = np.array( ctimes ) print( '{0:} average: {1:} (N: {2:})'.format(", "= 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( ' ',", "float( data[1] ) DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data ) for key", "bottom=acm, label=lab, color=c_l[i] ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )", "ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png'", ") fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top", "np.array( ctimes ) print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ),", "= np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][ (", "except: print( \"Failed\", data ) scale_l = np.array( scale_l ) key_l = [", "tit_ == 'SCALE': i += 1 if tit_ == \"WRITE\": dat_ = float(", "va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig", "== 'SCALE': i += 1 if tit_ == \"WRITE\": dat_ = float( data[6]", "31.0 ) ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print( ofig ) if quick_bar:", "if tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print(", "for dir_ in dirs: path_l.append( os.path.join( top, dir_, ) ) scale_l = []", "1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 )", "# Scott's choise #h = 3.5 * np.std( dat, ddof=1 ) / np.power(", "#dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] =", "np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum,", "= [] ftimes = [] ctimes = [] # Prepare file path list", "DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )]", "np.nanmean( ctimes ), len(ctimes) ) ) print( '{0:} average: {1:} (N: {2:})'.format( \"fcst", "bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0", "linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines(", ") except: print( \"Failed\", data ) scale_l = np.array( scale_l ) key_l =", "# if key == \"READ_OBS\": # dat_ -= dat_jit_ print( \"#### \", key,", "#print( DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM )", "SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key", "path_l.append( os.path.join( top, dir_, fname ) ) # Get computation time for path", "for key in SUM.keys(): SUM[key] = npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes']", "np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT =", "ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01,", "dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 += dic[key]", "mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt fig,", "= 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test =", "ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20,", "elif lab == 'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2,", "ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12 )", "scale_l else: DETAIL[key] = np.copy( iarray ) # Get computation time for all", "SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0,", "width=width1 ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels", "= mode_ DETAIL_MODE[key] = mean_ else: print( 'Not plot ', key) read_obs_test =", "forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1 ), len( ftimes )", "DETAIL_MODE.keys(): print( key ) if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key", ") #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 22, 2 ) ax1.set_ylim(", "key_ ) print( ofig ) if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches", ") ) print( '{0:} average: {1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes ),", "process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\":", "elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test =", "'##### TIMER' in l: data = l.split() try: tit_ = data[3] tit4_ =", "fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top =", ") acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels =", "== \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE", "'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color = 'k' ax.vlines(", "tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i if i_ <", "[] path_l = [] init = [] init_others = [] init_letkf = []", "ax in enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom'", "print( key, dat ) #dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ]", "key in SUM.keys(): SUM[key] = npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print(", ") / np.power( dat.size, 1.0/3.0) #bins = int( ( xmax - xmin )", "np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1 ), len(", "in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data", "= 0.0 for i, key in enumerate( dic2.keys() ): lab = key if", "= '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes,", "+= dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend(", ") ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf'", "\"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, }", ") handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel(", "= [] ctimes = [] # Prepare file path list for dir_ in", "mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else:", ") # Square-root choice bins = int( np.sqrt( dat.size ) ) fig, ax", "ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 += dic[key] # ax.legend(", "= [] anal_mean = [] write_anal = [] deallocate = [] write_restarta =", "= 60 # Scott's choise #h = 3.5 * np.std( dat, ddof=1 )", "f.name for f in os.scandir( top ) ] #if f.is_file() ] ftimes =", "DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data ) elif", "in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL ) def", "+= dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels() ax.legend(", "from datetime import datetime, timedelta import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs(", "4000 #dat_.size ls = 'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw,", "\"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\",", "for all i = -1 for path in path_l: if not os.path.isfile( path", "key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt from scipy import stats xmin =", "# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels),", "mode ) lw = 1.0 ymin = 0.0 ymax = 4000 #dat_.size ls", "SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for", "#print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes ) #plot_bar_2p_scale( dic=SUM,", "tit4_ = data[4] dat_ = float( data[5] ) if tit_ == 'SCALE': i", "write_restartg = [] das_letkf = [] anal_mean = [] write_anal = [] deallocate", "== \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes", "'/', '_' ) #.replace( '(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ )", "plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust(", "= dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs", "{1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) ) ) print( '{0:} average:", "bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0,", "= float( data[5] ) if tit_ == \"WRITE\": dat_ = float( data[6] )", "SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\":", "DETAIL_MODE_test = { } min_read_obs = 1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"]", "l in lines: if '[Info:fcst] End forecast' in l: data = l.split() try:", "max_read_obs )] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys():", "RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL:", "data = l.split() try: ftimes.append( float( data[7] ) ) except: print( \"Failed\", data", "# dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL_test[key] ),", "dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01,", "#print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top, ) ctimes = np.array(", "'SCALE': DETAIL[key] = scale_l else: DETAIL[key] = np.copy( iarray ) # Get computation", "reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim(", "linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2,", "#h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins", "'_' ) #.replace( '(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print(", "ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot", "plt.clf() plt.close('all') #### SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, #", "= \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_", ") print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) )", "plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt fig,", "0.0 ymax = 4000 #dat_.size ls = 'dashed' color = 'b' ax.vlines( x=mode,", "0 if tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except:", "ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test,", "l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append( dat_ )", "for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test", "== 'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i],", "), len(ctimes) ) ) print( '{0:} average: {1:} (N: {2:})'.format( \"fcst \", np.mean(", "lab == 'OBS': lab = 'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab", ") | ( read_obs_test > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL_test[key]", "'k' ] acm = 0.0 for i, key in enumerate( dic.keys() ): lab", "ax_l = [ ax1, ax2 ] tit_l = [ \"Data assimilation\", \"30-min forecast\"", "'......jitdt_read_toshiba:jitget:' in l: data = l.split() try: tit_ = \"JIT_GET\" dat_ = float(", "init_letkf = [] scale = [] others = [] read_obs = [] obsope", "handles, labels = ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation", "color=c_l[i], width=width1 ) acm += dic[key] acm2 = 0.0 for i, key in", ") if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all')", "~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat) ] num =", "quick_hist = False quick_bar = True quick_bar = False def d4_computation_time_nparray( top='' ):", "wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 = 0.8 #c_l = [ 'firebrick',", "data ) elif '[Info:DA]' in l: data = l.split() try: ctimes.append( float( data[6]", "1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 )", ") else: with np.load( fn_sum, allow_pickle=True ) as npz: for key in SUM.keys():", "= [ ax1, ax2 ] tit_l = [ \"Data assimilation\", \"30-min forecast\" ]", "fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ )", "key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = {", "plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot as plt fig, ax = plt.subplots(", ") if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all')", "np.nan time_ = np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat)", "RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta,", "): dirs = [ f.name for f in os.scandir( top ) ] #if", "< min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan time_ =", "ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) #", "= DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs ) | (", "\"(b)\" ] for i, ax in enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i],", "elif lab == 'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT':", "ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt from", "1.0 ) yticks = np.arange( 0, 22, 2 ) ax1.set_ylim( 0, 20.0 )", "lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm", "} fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not", "for dir_ in dirs: fname = 'job.o' #[ f.name for f in os.scandir(", "read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan time_", ") #print( DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM", "DETAIL_test[key] ) ) if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_", "'magenta', 'y', 'k' ] acm = 0.0 for i, key in enumerate( dic.keys()", "= [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen',", "'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k'", "bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00)", "= mean_ else: print( 'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test =", "not os.path.isfile( path ): break with open( path ) as f: lines =", ") if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key]", "= np.mean( rbins[imode:imode+2] ) mean = np.mean( dat ) #print( len(rn), len(rbins), mode", "dic2.keys() ): lab = key if lab == 'OBS': lab = 'Obs pre-\\nprocessing'", "SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\":", "top=False, labelbottom=False ) ax_l = [ ax1, ax2 ] tit_l = [ \"Data", ") ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60,", "= '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m'", "> max_read_obs )] = np.nan time_ = np.nanmean( DETAIL[key] ) dat = DETAIL[key]", "= False quick_bar = True quick_bar = False def d4_computation_time_nparray( top='' ): dirs", "\"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif", "dat, ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins = int( ( xmax -", "ftimes, ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt", "tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append( dat_ ) except: print(", "label=None, color=c_l[i], width=width1 ) acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )", "i if i_ < 0: i_ = 0 if tit_ in DETAIL: DETAIL[tit_][i_]", "time_ = np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat) &", "len( dat_ ) # if key == \"READ_OBS\": # dat_ -= dat_jit_ print(", ") def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt from scipy import", "= { } min_read_obs = 1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit", "plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt from scipy import stats xmin", "\"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ]", "plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print( 'Not plot ', key)", "in l: data = l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1]", "= 1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ (", "ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12", ") ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show()", "datetime import datetime, timedelta import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path,", "top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL =", "y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar( 1, np.mean(ftimes),", "ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True ) as npz: for key in", ") except: print( \"Failed\", data ) elif '##### TIMER' in l: data =", "das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\":", "ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE = { } DETAIL_MODE_test = {", ") print( '{0:} average: {1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes)", "[] jitget = [] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\":", "DETAIL ) def d4_computation_time( top='', ctmax=600 ): dirs = [ f.name for f", "= [] obsope = [] process_obs = [] set_grid = [] read_gues =", "= int( ( xmax - xmin ) / h ) # Square-root choice", "TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format(", "= 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs", "= 0.1) plt.clf() plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot as plt fig,", "== \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] #", "continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm += dic[key] # ax.legend(", "xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim(", "write_anal = [] deallocate = [] write_restarta = [] others = [] finalize", "datetime, timedelta import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True )", "#[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if", ") ] #if f.is_file() ] ftimes = [] ctimes = [] path_l =", ") dat = DETAIL_test[key] print( key, dat ) #dat_ = dat[ ~np.isnan(dat) &", "tit_l = [ \"Data assimilation\", \"30-min forecast\" ] pnum_l = [ \"(a)\", \"(b)\"", ") #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes ) #plot_bar_2p_scale(", "np.arange( 0, 22, 2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params(", "( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan", "if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return(", "'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue ax.bar( '',", "in lines: if '[Info:fcst] End forecast' in l: data = l.split() try: ftimes.append(", "\", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if num", ") mean = np.mean( dat ) #print( len(rn), len(rbins), mode ) lw =", "\"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\",", "#print( len(rn), len(rbins), mode ) lw = 1.0 ymin = 0.0 ymax =", "len( ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l =", "color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue'", "0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10,", "dat_ except: print( \"Failed\", data ) return( ftimes, ctimes, DETAIL ) def d4_computation_time(", "key if lab == 'OBS': lab = 'Obs pre-\\nprocessing' elif lab == 'DATA", "read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan time_", "for key in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_", "data[5] ) if tit_ == 'SCALE': scale_l.append( dat_ ) except: print( \"Failed\", data", "] pnum_l = [ \"(a)\", \"(b)\" ] for i, ax in enumerate( ax_l", "ofig ) if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf()", "= plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print( 'Not plot ',", "width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\",", "tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\",", "path_l: if not os.path.isfile( path ): break with open( path ) as f:", "= 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12,", "= 0.0 for i, key in enumerate( dic.keys() ): lab = key if", "forecast' in l: data = l.split() try: ftimes.append( float( data[7] ) ) except:", "acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels()", "\"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare file", "+= DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else: with", "= 1.0 ymin = 0.0 ymax = 4000 #dat_.size ls = 'dashed' color", "= 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std(", "in os.scandir( top ) ] #if f.is_file() ] path_l = [] ftimes =", "ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color = 'k' ax.vlines( x=mean,", "dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][ ( read_obs_ <", "0.0 for i, key in enumerate( dic2.keys() ): lab = key if lab", "tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left',", "obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues,", ") ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5", "plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return( mode, mean", "yticks = np.arange( 0, 22, 2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks", "i_ = i if i_ < 0: i_ = 0 if tit_ in", "plt from scipy import stats xmin = 0 xmax = 60 # Scott's", "False quick_hist = False quick_bar = True quick_bar = False def d4_computation_time_nparray( top=''", "= np.copy( iarray ) # Get computation time for all i = -1", "SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM, ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else:", "20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim(", "\", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if num", "'{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test", "with open( path ) as f: lines = f.readlines() for l in lines:", ") #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 32, 2 ) ax.set_ylim(", "key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')')", "#if f.is_file() ] path_l.append( os.path.join( top, dir_, fname ) ) # Get computation", "max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys():", "i, ax in enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center',", "len(rn), len(rbins), mode ) lw = 1.0 ymin = 0.0 ymax = 4000", "'OBS': lab = 'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab = 'Memory", "& ~np.isnan( dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat) ] num = len(", "key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print( 'Not plot ', key) for", "ftimes = [] ctimes = [] # Prepare file path list for dir_", "labelbottom=False ) ax_l = [ ax1, ax2 ] tit_l = [ \"Data assimilation\",", "'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f}", "if key == \"READ_OBS\": # dat_ -= dat_jit_ print( \"#### \", key, time_,", "plt.close('all') #### SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA", "float( data[6] ) ) except: print( \"Failed\", data ) elif '##### TIMER' in", "len(rbins), mode ) lw = 1.0 ymin = 0.0 ymax = 4000 #dat_.size", "path_l = [] ftimes = [] ctimes = [] # Prepare file path", "set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\":", "\"WRITE RESTART/GRADS(GUES)\" i_ = i if i_ < 0: i_ = 0 if", "): import matplotlib.pyplot as plt fig, ax = plt.subplots( 1, 1, figsize=(5,5) )", "ymin = 0.0 ymax = 4000 #dat_.size ls = 'dashed' color = 'b'", "[ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod',", "Get computation time for all i = -1 for path in path_l: if", "bins=bins, alpha=0.6 ) imode = np.argmax( rn ) mode = np.mean( rbins[imode:imode+2] )", ") except: print( \"Failed\", data ) elif '[Info:DA]' in l: data = l.split()", "figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95,", "left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim(", "== \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] #", "color=c_l[i] ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels", "'{0:}/SUM.npz'.format( data_path, ) fn_ftimes = '{0:}/ftimes.npz'.format( data_path, ) if not USE_ARCH_DAT: top =", "DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data", ") # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,", "= dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:'", "fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches = ax.hist( dat, range=(xmin,", "\"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist =", "ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax )", "figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches = ax.hist(", "plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt fig, ( ax1,ax2 )", "= False def d4_computation_time_nparray( top='' ): dirs = [ f.name for f in", "== \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] +=", "print( \"#### \", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) )", "+= DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key ==", "write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\":", "time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 32,", "ftimes = [] ctimes = [] path_l = [] init = [] init_others", "width=width1 ) acm += dic[key] acm2 = 0.0 for i, key in enumerate(", "time_ = np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print( key, dat ) #dat_", "dat_ ) # if key == \"READ_OBS\": # dat_ -= dat_jit_ print( \"####", "dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles),", ") width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print(", "-= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key]", "] ftimes = [] ctimes = [] path_l = [] init = []", "[ ax1, ax2 ] tit_l = [ \"Data assimilation\", \"30-min forecast\" ] pnum_l", "lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) ax2.set_ylim( 0, 151.0 ) ax2.set_xlim( 0, 2.0 )", "top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes,", "#dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test >", "TIMER' in l: data = l.split() try: tit_ = data[3] dat_ = float(", "== \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_ )", "DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data ) for key in DETAIL.keys(): DETAIL[key]", "\"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys():", ") if key == \"READ_OBS\": dat_ -= dat_jit_ print( \"#### \", key, time_,", "= len( dat_ ) if key == \"READ_OBS\": dat_ -= dat_jit_ print( \"####", "] num = len( dat_ ) if key == \"READ_OBS\": dat_ -= dat_jit_", "l: data = l.split() try: ftimes.append( float( data[7] ) ) except: print( \"Failed\",", "i += 1 if tit_ == \"WRITE\": dat_ = float( data[6] ) if", "= l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append( dat_", "\"JIT_GET\": jitget, } # Prepare file path list for dir_ in dirs: fname", "plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) =", "matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )", "1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print( ofig", "bottom=0.05, right=0.5, top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l", "pad_inches = 0.1) plt.clf() plt.close('all') #### SUM = { \"SCALE\": 0.0, \"LETKF\": 0.0,", "), np.nanmin( DETAIL[key] ) ) if num > 100: mode_, mean_ = plot_hist(", "= 0.1) plt.clf() plt.close('all') return( mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ):", "elif '##### TIMER' in l: data = l.split() try: tit_ = data[3] dat_", "lines: if '[Info:fcst] End forecast' in l: data = l.split() try: ftimes.append( float(", "mode_, mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_", "\"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0,", "32, 2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png'", "( read_obs_ > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL[key] ) dat", "hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue',", "\"std:\", np.std( ftimes, ddof=1 ), len( ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False,", "key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key]", "scipy import stats xmin = 0 xmax = 60 # Scott's choise #h", "continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] acm2", "top='' ): dirs = [ f.name for f in os.scandir( top ) ]", "+= dic[key] acm2 = 0.0 for i, key in enumerate( dic2.keys() ): lab", "0.0, } for key in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key]", ") return( ftimes, ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot", "1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray(", "plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else: print( 'Not", "DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, }", "ctimes = np.array( ctimes ) print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean(", ") ctimes = np.array( ctimes ) print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\",", "np.nanmin( DETAIL_test[key] ) ) if num > 100: mode_, mean_ = plot_hist( key=key,", "dirs: fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_", "if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') ####", "= [ 'cyan', 'magenta', 'y', 'k' ] acm = 0.0 for i, key", "# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3,", "'/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test,", ") # if key == \"READ_OBS\": # dat_ -= dat_jit_ print( \"#### \",", "= [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l = [ 'cyan', 'magenta',", "= [] others = [] read_obs = [] obsope = [] process_obs =", "22, 2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both',", "0, 31.0 ) ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print( ofig ) if", "dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs )", "#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue', 'firebrick',", "as plt fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) ) #", "else: DETAIL[key] = np.copy( iarray ) # Get computation time for all i", "= True quick_bar = False def d4_computation_time_nparray( top='' ): dirs = [ f.name", "dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in", "\"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys(): if key", "\"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] +=", "key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or key", "elif '[Info:DA]' in l: data = l.split() try: ctimes.append( float( data[6] ) )", "fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ = key ax.text( 0.5, 1.01, tit_, fontsize=12,", "forecast\" ] pnum_l = [ \"(a)\", \"(b)\" ] for i, ax in enumerate(", "'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1", "ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format(", "key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key] else: SUM_test[\"LETKF\"] += DETAIL_MODE_test[key] np.savez( fn_sum, **SUM,", "mode = np.mean( rbins[imode:imode+2] ) mean = np.mean( dat ) #print( len(rn), len(rbins),", "'Memory copy' elif lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i],", "data[6] ) ) except: print( \"Failed\", data ) elif '##### TIMER' in l:", "try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i] = dat_ except:", "except: print( \"Failed\", data ) elif '##### TIMER' in l: data = l.split()", ") return( ftimes, ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600 ): dirs =", "[] init_letkf = [] scale = [] others = [] read_obs = []", "\"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ )", "ax = plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, )", "tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append(", "top, dir_ ) ) ] #if f.is_file() ] path_l.append( os.path.join( top, dir_, fname", "fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 =", "ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed', color='gray', alpha=0.5 )", "\"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 +=", "data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ ==", "\"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget,", "'_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format(", "top ) ] #if f.is_file() ] ftimes = [] ctimes = [] path_l", "dat_jit ) ] num = len( dat_ ) if key == \"READ_OBS\": dat_", "3.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]", "[ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\",", "dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] )", "read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\":", ") ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4,", "right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 = 0.8 #c_l =", "\"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan", "if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] +=", "lab == 'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar( 2.0, dic2[key], bottom=acm2, label=None,", "= np.nan time_ = np.nanmean( DETAIL[key] ) dat = DETAIL[key] dat_ = dat[", "= data[3] tit4_ = data[4] dat_ = float( data[5] ) if tit_ ==", "ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print(", "ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False", ") ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig =", "init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean,", "ftimes=ftimes ) np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True ) as", "data[3] tit4_ = data[4] dat_ = float( data[5] ) if tit_ == 'SCALE':", "ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw,", "= \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i] = dat_ except: print( \"Failed\",", "'##### TIMER' in l: data = l.split() try: tit_ = data[3] dat_ =", "in l: data = l.split() try: tit_ = data[3] dat_ = float( data[5]", "[] read_gues = [] gues_mean = [] write_restartg = [] das_letkf = []", ") color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_", "fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 32, 2 )", "'(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if", "h ) # Square-root choice bins = int( np.sqrt( dat.size ) ) fig,", "\"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize,", "', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs", "DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key == \"READ_GUES\"", "1 if tit_ == \"WRITE\": dat_ = float( data[6] ) if tit4_ ==", "2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print(", ") tit_ = key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' )", "= \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append( dat_ ) except: print( \"Failed\",", "np.array( scale_l ) key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\",", "'gold' ] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ] c_l = [", "key in enumerate( dic.keys() ): lab = key if lab == 'OBS': lab", "# Prepare file path list for dir_ in dirs: path_l.append( os.path.join( top, dir_,", "lw = 1.0 ymin = 0.0 ymax = 4000 #dat_.size ls = 'dashed'", "from scipy import stats xmin = 0 xmax = 60 # Scott's choise", "\"fcst \", np.mean( ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE = { }", "[] init = [] init_others = [] init_letkf = [] scale = []", "+= DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0,", "DETAIL[key] = scale_l else: DETAIL[key] = np.copy( iarray ) # Get computation time", "width=width1 ) acm2 += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels", "float( data[7] ) ) except: print( \"Failed\", data ) elif '[Info:DA]' in l:", "read_obs = [] obsope = [] process_obs = [] set_grid = [] read_gues", "plt fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust(", "0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 )", ") if tit_ == 'SCALE': scale_l.append( dat_ ) except: print( \"Failed\", data )", "color=c_l[i], width=width1 ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles,", "== 'SCALE': scale_l.append( dat_ ) except: print( \"Failed\", data ) scale_l = np.array(", "[] ftimes = [] ctimes = [] # Prepare file path list for", "rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode = np.argmax(", "np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if num > 100: mode_, mean_", "if lab == 'OBS': lab = 'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER':", "TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue print( \"check\", dic2[key]", "): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01,", "quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return( mode,", "fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax )", "acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax.get_legend_handles_labels()", "ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12 )", ") | ( read_obs_ > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL[key]", "print( key ) if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key ==", ") ) if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ )", ") for key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return( ftimes, ctimes,", "] for i, ax in enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12,", "SUM.keys(): SUM[key] = npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM )", "elif '......jitdt_read_toshiba:jitget:' in l: data = l.split() try: tit_ = \"JIT_GET\" dat_ =", "l.split() try: ftimes.append( float( data[7] ) ) except: print( \"Failed\", data ) elif", "2.0, dic2[key], bottom=acm2, label=None, color=c_l[i], width=width1 ) acm2 += dic[key] # ax.legend( fontsize=12,", "dat_ = float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\"", "except: print( \"Failed\", data ) for key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key]", "f.is_file() ] path_l.append( os.path.join( top, dir_, fname ) ) # Get computation time", "max_read_obs )] = np.nan time_ = np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print(", "] c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l = [", "print( '{0:} average: {1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes) )", "xmax ) ax.set_ylim( ymin, ymax ) xlab = 'Computation time (s)' ylab =", "{} for key in key_l: if key == 'SCALE': DETAIL[key] = scale_l else:", "xmax), bins=bins, alpha=0.6 ) imode = np.argmax( rn ) mode = np.mean( rbins[imode:imode+2]", "0, 1.0 ) yticks = np.arange( 0, 32, 2 ) ax.set_ylim( 0, 31.0", "= d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top,", "\"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\"", "SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes )", ") iarray[:] = np.nan DETAIL = {} for key in key_l: if key", "figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue',", "lab == 'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue", "ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm += dic[key] # ax.legend( fontsize=12,", "if '##### TIMER' in l: data = l.split() try: tit_ = data[3] tit4_", "label=lab, color=c_l[i], width=width1 ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )", "dat_ = float( data[1] ) DETAIL[tit_][i] = dat_ except: print( \"Failed\", data )", "'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\",", "import os import sys from datetime import datetime, timedelta import numpy as np", "int( ( xmax - xmin ) / h ) # Square-root choice bins", "'', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01,", "#.replace( '(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig )", "else: print( 'Not plot ', key) for key in DETAIL_MODE.keys(): print( key )", "DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )]", "labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time", ") lw = 1.0 ymin = 0.0 ymax = 4000 #dat_.size ls =", "= float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif", "as plt fig, ax = plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05,", ") = plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92,", ") if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000'", "elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append(", "== \"READ_OBS\": # dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax(", "l.split() try: tit_ = data[3] tit4_ = data[4] dat_ = float( data[5] )", "\"Data assimilation\", \"30-min forecast\" ] pnum_l = [ \"(a)\", \"(b)\" ] for i,", "= \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_", "ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE ) #print(", "False quick_bar = True quick_bar = False def d4_computation_time_nparray( top='' ): dirs =", "{ } min_read_obs = 1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit =", "read_obs_test > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL_test[key] ) dat =", "top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes,", ") elif '[Info:DA]' in l: data = l.split() try: ctimes.append( float( data[6] )", "= key if lab == 'OBS': lab = 'Obs pre-\\nprocessing' elif lab ==", "bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0", "l: data = l.split() try: tit_ = data[3] tit4_ = data[4] dat_ =", "= [] # Prepare file path list for dir_ in dirs: path_l.append( os.path.join(", "\"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid,", "file path list for dir_ in dirs: fname = 'job.o' #[ f.name for", "= ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode = np.argmax( rn )", "d4_computation_time_nparray( top='' ): dirs = [ f.name for f in os.scandir( top )", "# elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] +=", "read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan dat_jit_", "in dirs: fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top,", "which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ),", "bins = int( np.sqrt( dat.size ) ) fig, ax = plt.subplots( 1, 1,", "ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l = [ ax1, ax2 ]", "1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l = [", "os.path.isfile( path ): break with open( path ) as f: lines = f.readlines()", "'k' ] #c_l = [ 'cyan', 'magenta', 'y', 'k' ] acm = 0.0", "if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def", "top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1 = 0.8 #c_l = [", "ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show()", "return( ftimes, ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as", "= l.split() try: tit_ = data[3] tit4_ = data[4] dat_ = float( data[5]", "data[1] ) DETAIL[tit_][i] = dat_ except: print( \"Failed\", data ) return( ftimes, ctimes,", "elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or", "data[4] dat_ = float( data[5] ) if tit_ == 'SCALE': i += 1", "key, dat ) #dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_", "dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin(", "'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] ) acm += dic[key] #", "matplotlib.pyplot as plt from scipy import stats xmin = 0 xmax = 60", "\"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan array", "= plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else: print(", "DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt from scipy", "i, key in enumerate( dic.keys() ): lab = key if lab == 'OBS':", "= DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs", "try: ftimes.append( float( data[7] ) ) except: print( \"Failed\", data ) elif '[Info:DA]'", "[] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others,", "if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] =", "l: data = l.split() try: ctimes.append( float( data[6] ) ) except: print( \"Failed\",", "== 'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue print(", "pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab = 'Memory copy' elif lab ==", ") yticks = np.arange( 0, 32, 2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks(", "tit_ = key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim(", "lines = f.readlines() for l in lines: if '##### TIMER' in l: data", "] path_l = [] ftimes = [] ctimes = [] # Prepare file", "bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins,", "# Prepare file path list for dir_ in dirs: fname = 'job.o' #[", "data = l.split() try: ctimes.append( float( data[6] ) ) except: print( \"Failed\", data", "plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar( dic={} ): import", "DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key == \"READ_GUES\"", "print( \"#### \", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) )", "= [] read_obs = [] obsope = [] process_obs = [] set_grid =", "= dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ] num = len( dat_ )", "d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5])", "SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else: SUM[\"LETKF\"]", ") acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) ) handles, labels =", "= 0 if tit_ in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_", "0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes,", "== \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key", "dat_ = float( data[1] ) DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data )", ") fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1", "= mean_ else: print( 'Not plot ', key) for key in DETAIL_MODE.keys(): print(", "] acm = 0.0 for i, key in enumerate( dic.keys() ): lab =", "for SCALE for path in path_l: if not os.path.isfile( path ): break with", "SCALE for path in path_l: if not os.path.isfile( path ): break with open(", "nan array iarray = np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL = {}", "x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color = 'k' ax.vlines( x=mean, ymin=ymin,", "read_obs_ > max_read_obs )] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ] for key", "tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax", "key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if num >", ") ofig = 'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig,", "except: print( \"Failed\", data ) return( ftimes, ctimes, DETAIL ) def d4_computation_time( top='',", "ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5,", ") except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data = l.split()", "'firebrick', 'gray', 'goldenrod', 'k' ] #c_l = [ 'cyan', 'magenta', 'y', 'k' ]", "'firebrick', 'forestgreen', 'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]", "scale_l = [] # Get computation time for SCALE for path in path_l:", "read_gues = [] gues_mean = [] write_restartg = [] das_letkf = [] anal_mean", "write_anal, \"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, }", "\"WRITE\": dat_ = float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE", ") ax1.set_ylim( 0, 20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False,", "color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) color =", "ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0,", "data ) for key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return( ftimes,", "\"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key]", "= 'Memory copy' elif lab == 'JIT-DT': continue print( \"check\", dic2[key] ) ax1.bar(", "'Memory copy' elif lab == 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i]", "va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax ) xlab = 'Computation", "DETAIL_MODE_test[key] = mean_ else: print( 'Not plot ', key) for key in DETAIL_MODE.keys():", "\"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\",", "{ } DETAIL_MODE_test = { } min_read_obs = 1.0 max_read_obs = 30.0 read_obs_", "== 'OBS': lab = 'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab =", "bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot as", ") ) except: print( \"Failed\", data ) elif '[Info:DA]' in l: data =", ") mode = np.mean( rbins[imode:imode+2] ) mean = np.mean( dat ) #print( len(rn),", "DETAIL[key] = np.copy( iarray ) # Get computation time for all i =", "= \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ =", "array iarray = np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL = {} for", "= dat_ except: print( \"Failed\", data ) return( ftimes, ctimes, DETAIL ) def", "fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig ) if quick_bar:", "'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key]", "jitget = [] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init,", "'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches =", "ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots(", "DETAIL[key] = np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL ) def plot_hist( key=\"\",", "# ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig,", "va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig =", "DETAIL = d4_computation_time_nparray( top=top, ) ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print(", "= scale_l else: DETAIL[key] = np.copy( iarray ) # Get computation time for", "] for key in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs ) | (", "(s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 22, 2", "ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks ) ofig = 'png/1p_d4_bar.png' print( ofig )", "fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom'", "top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',", "average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) ) ) print( '{0:}", "fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True ) as npz: for key", "): lab = key if lab == 'OBS': lab = 'Obs pre-\\nprocessing' elif", "0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print(", "ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL =", "in enumerate( ax_l ): ax.text( 0.5, 1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' )", "= dat[ ~np.isnan(dat) ] num = len( dat_ ) # if key ==", "\"Failed\", data ) scale_l = np.array( scale_l ) key_l = [ \"SCALE\", \"READ_OBS\",", "= np.arange( 0, 32, 2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks )", "ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8", "time for all i = -1 for path in path_l: if not os.path.isfile(", "'/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL", ") # Get computation time for path in path_l: if not os.path.isfile( path", "ax1.set_xlim( 0, 3.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen',", "DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] =", "open( path ) as f: lines = f.readlines() for l in lines: if", "= l.split() try: tit_ = data[3] dat_ = float( data[5] ) if tit_", "lines: if '##### TIMER' in l: data = l.split() try: tit_ = data[3]", "= 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f}", "Prepare file path list for dir_ in dirs: path_l.append( os.path.join( top, dir_, )", "color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ =", "SUM ) #print( DETAIL_MODE ) #print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar(", "print( \"std:\", np.std( ftimes, ddof=1 ), len( ftimes ) ) ax2.tick_params( axis='x', which='both',", "np.mean( dat ) #print( len(rn), len(rbins), mode ) lw = 1.0 ymin =", "rbins[imode:imode+2] ) mean = np.mean( dat ) #print( len(rn), len(rbins), mode ) lw", "' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')') ofig", "'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test", "0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt", "| ( read_obs_test > max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ]", "= np.argmax( rn ) mode = np.mean( rbins[imode:imode+2] ) mean = np.mean( dat", "[] finalize = [] jitget = [] DETAIL = { \"SCALE\": scale, \"READ_OBS\":read_obs,", "plt.clf() plt.close('all') def plot_bar( dic={} ): import matplotlib.pyplot as plt fig, ax =", "tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" if tit_ in DETAIL: DETAIL[tit_].append( dat_", "0, 32, 2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks ) ofig =", "~np.isnan( dat_jit ) ] num = len( dat_ ) if key == \"READ_OBS\":", "os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist = False", "if tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except: print(", "= np.nan DETAIL = {} for key in key_l: if key == 'SCALE':", "pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig ) if", "plot ', key) for key in DETAIL_MODE.keys(): print( key ) if key ==", "num = len( dat_ ) # if key == \"READ_OBS\": # dat_ -=", "= [] # Get computation time for SCALE for path in path_l: if", "= 'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches", "list for dir_ in dirs: fname = 'job.o' #[ f.name for f in", "SUM[\"OBS\"] += DETAIL_MODE[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": #", ") ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l = [ ax1, ax2", "\"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\": das_letkf, \"ANAL_MEAN\": anal_mean, \"WRITE_ANAL\": write_anal,", "[] process_obs = [] set_grid = [] read_gues = [] gues_mean = []", "] #if f.is_file() ] path_l.append( os.path.join( top, dir_, fname ) ) # Get", "left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 3.0 ) width1 = 0.8", "DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ] num = len(", "ha='center', va='bottom' ) ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax ) xlab =", "[ \"(a)\", \"(b)\" ] for i, ax in enumerate( ax_l ): ax.text( 0.5,", "import matplotlib.pyplot as plt from scipy import stats xmin = 0 xmax =", "#print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes", ") acm += dic[key] acm2 = 0.0 for i, key in enumerate( dic2.keys()", "handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12 ) ax1.set_ylabel( 'Computation", ").replace( '/', '_' ) #.replace( '(', '_' ).replace( ')') ofig = 'png/1p_d4_{0:}.png'.format( key_", "== 'DATA TRANSFER': lab = 'Memory copy' elif lab == 'JIT-DT': continue ax.bar(", ") ) ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l = [ ax1,", "#dat_.size ls = 'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls,", "DETAIL = {} for key in key_l: if key == 'SCALE': DETAIL[key] =", "\"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key ==", "def plot_bar( dic={} ): import matplotlib.pyplot as plt fig, ax = plt.subplots( 1,", "for i, key in enumerate( dic.keys() ): lab = key if lab ==", "lab = 'Obs pre-\\nprocessing' elif lab == 'DATA TRANSFER': lab = 'Memory copy'", "num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if num > 100: mode_,", ") np.savez( fn_ftimes, ftimes=ftimes ) else: with np.load( fn_sum, allow_pickle=True ) as npz:", "} DETAIL_MODE_test = { } min_read_obs = 1.0 max_read_obs = 30.0 read_obs_ =", "for i, key in enumerate( dic2.keys() ): lab = key if lab ==", "top=0.92, ) rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 )", "axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l = [ ax1, ax2 ] tit_l", "key_l: if key == 'SCALE': DETAIL[key] = scale_l else: DETAIL[key] = np.copy( iarray", "] #if f.is_file() ] ftimes = [] ctimes = [] path_l = []", ") ) except: print( \"Failed\", data ) elif '##### TIMER' in l: data", "( read_obs_test > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL_test[key] ) dat", "( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05,", "DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key == \"READ_OBS\": SUM_test[\"OBS\"]", "1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_", "s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right',", "# SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM[\"JIT-DT\"] += DETAIL_MODE[key] else:", "fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( ' ', '_' ).replace( '/', '_'", "\"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data", "bottom=False, top=False, labelbottom=False ) ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0,", "tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_", "np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test", "if not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max", ") #print( len(rn), len(rbins), mode ) lw = 1.0 ymin = 0.0 ymax", "= 'Computation time (s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11)", "quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar(", "for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ]", "= np.array( DETAIL[key] ) return( ftimes, ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([])", "| ( read_obs_ > max_read_obs )] = np.nan time_ = np.nanmean( DETAIL[key] )", "): import matplotlib.pyplot as plt fig, ( ax1,ax2 ) = plt.subplots( 1, 2,", "ofig = 'png/1p_d4_bar.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\",", "\"WRITE_ANAL\": # SUM[\"DATA TRANSFER\"] += DETAIL_MODE[key] elif key == \"JIT_GET\": SUM_test[\"JIT-DT\"] += DETAIL_MODE_test[key]", "def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt fig, ( ax1,ax2", "color=color ) color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color )", "[] set_grid = [] read_gues = [] gues_mean = [] write_restartg = []", "'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes,", "ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ):", ") yticks = np.arange( 0, 22, 2 ) ax1.set_ylim( 0, 20.0 ) ax1.set_yticks(", "tit_ == \"WRITE\": dat_ = float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_", "20.0 ) ax1.set_yticks( yticks ) ax1.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax1.hlines(", "float( data[5] ) if tit_ == 'SCALE': i += 1 if tit_ ==", "l.split() try: tit_ = data[3] dat_ = float( data[5] ) if tit_ ==", "return( mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt", "{ \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0,", "\"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\",", "DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_", "with np.load( fn_sum, allow_pickle=True ) as npz: for key in SUM.keys(): SUM[key] =", "if tit_ == 'SCALE': i += 1 if tit_ == \"WRITE\": dat_ =", ") width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l", "dic[key], bottom=acm, label=lab, color=c_l[i] ) acm += dic[key] # ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00)", ") dat = DETAIL[key] dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ]", "alpha=0.6 ) imode = np.argmax( rn ) mode = np.mean( rbins[imode:imode+2] ) mean", "fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks", "\"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE", "\"Failed\", data ) for key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] ) return(", "else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') #### SUM = {", "num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_", "0.0, \"LETKF\": 0.0, \"OBS\": 0.0, # \"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum", "[] write_restarta = [] others = [] finalize = [] jitget = []", "elif lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 )", "np.mean( ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE = { } DETAIL_MODE_test =", "dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] =", "in DETAIL_MODE.keys(): print( key ) if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif", "= { \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf,", "try: tit_ = data[3] tit4_ = data[4] dat_ = float( data[5] ) if", "key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test < min_read_obs )", "2.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]", "if i_ < 0: i_ = 0 if tit_ in DETAIL: DETAIL[tit_][i_] =", "dir_, fname ) ) # Get computation time for path in path_l: if", "DETAIL_test = d4_computation_time_nparray( top=top_test, ) #print( DETAIL[\"DAS_LETKF\"][0:5], DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time(", "DETAIL[key] ), np.nanmin( DETAIL[key] ) ) if num > 100: mode_, mean_ =", "DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] =", "path ): break with open( path ) as f: lines = f.readlines() for", "mean_ else: print( 'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET']", "anal_mean = [] write_anal = [] deallocate = [] write_restarta = [] others", "ofig = 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if quick_hist: plt.show() else: plt.savefig(", "'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches =", "'goldenrod' ] c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l =", "dat_ = float( data[5] ) if tit_ == 'SCALE': i += 1 if", "1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] acm2 = 0.0", "fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15,", "dat = DETAIL_test[key] print( key, dat ) #dat_ = dat[ ~np.isnan(dat) & ~np.isnan(", "= np.array( ctimes ) print( '{0:} average: {1:} (N: {2:})'.format( \"cycle\", np.nanmean( ctimes", "= len( dat_ ) # if key == \"READ_OBS\": # dat_ -= dat_jit_", ") scale_l = np.array( scale_l ) key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\",", "\"JIT_GET\", ] # prepare nan array iarray = np.zeros( scale_l.shape ) iarray[:] =", "= True #USE_ARCH_DAT = False quick_hist = False quick_bar = True quick_bar =", "scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\":", "in SUM.keys(): SUM[key] = npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM", "in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs", "enumerate( dic.keys() ): lab = key if lab == 'OBS': lab = 'Obs", "= d4_computation_time( top=top, ) ctimes = np.array( ctimes ) print( '{0:} average: {1:}", "key == \"READ_OBS\": # dat_ -= dat_jit_ print( \"#### \", key, time_, num,", "all i = -1 for path in path_l: if not os.path.isfile( path ):", "[] anal_mean = [] write_anal = [] deallocate = [] write_restarta = []", "plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot as plt fig, (", "color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99,", "time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 22,", "[] obsope = [] process_obs = [] set_grid = [] read_gues = []", "dat.size ) ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ =", "ctimes.append( float( data[6] ) ) except: print( \"Failed\", data ) elif '##### TIMER'", "\"READ_OBS\": # dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL_test[key]", "f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ] path_l.append(", "float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_", "= [] write_restarta = [] others = [] finalize = [] jitget =", "= i if i_ < 0: i_ = 0 if tit_ in DETAIL:", ") print( ofig ) if quick_hist: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches =", "#sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes ) #plot_bar_2p_scale( dic=SUM, dic2=SUM_test, ftimes=ftimes )", "elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key == \"READ_GUES\" or", "== \"READ_OBS\": dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL[key]", "#if f.is_file() ] ftimes = [] ctimes = [] path_l = [] init", "USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000", "key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key]", "process_obs = [] set_grid = [] read_gues = [] gues_mean = [] write_restartg", "\"WRITE RESTART/GRADS(GUES)\", \"DAS_LETKF\", \"ANAL_MEAN\", \"WRITE_ANAL\", \"DEALLOCATE\", \"WRITE RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] #", "= float( data[1] ) DETAIL[tit_][i] = dat_ except: print( \"Failed\", data ) return(", "else: with np.load( fn_sum, allow_pickle=True ) as npz: for key in SUM.keys(): SUM[key]", "= l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_][i] =", "print( 'Not plot ', key) for key in DETAIL_MODE.keys(): print( key ) if", "c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l = [ 'cyan',", ") fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92, wspace=0.3, hspace=0.05) ax1.set_xlim( 0, 2.0 ) width1", "'cyan', 'magenta', 'y', 'k' ] acm = 0.0 for i, key in enumerate(", "lab = key if lab == 'OBS': lab = 'Obs pre-\\nprocessing' elif lab", "[ 'cyan', 'magenta', 'y', 'k' ] acm = 0.0 for i, key in", "matplotlib.pyplot as plt fig, ax = plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15,", "= float( data[1] ) DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data ) for", "data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT = False", "0.0, \"LETKF\": 0.0, \"OBS\": 0.0, \"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys(): if", "if not os.path.isfile( path ): break with open( path ) as f: lines", "transform=ax.transAxes, ha='left', va='bottom' ) # ofig = 'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar:", "= 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [", "init_letkf, \"PROCESS_OBS\": process_obs, \"SET_GRID\": set_grid, \"READ_GUES\": read_gues, \"GUES_MEAN\": gues_mean, \"WRITE RESTART/GRADS(GUES)\": write_restartg, \"DAS_LETKF\":", ") | ( read_obs_test > max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test)", "DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data", "reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0,", "as f: lines = f.readlines() for l in lines: if '[Info:fcst] End forecast'", "time (s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ =", "min_read_obs = 1.0 max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[", "(N: {2:})'.format( \"cycle\", np.nanmean( ctimes ), len(ctimes) ) ) print( '{0:} average: {1:}", ") if num > 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key]", "lines = f.readlines() for l in lines: if '[Info:fcst] End forecast' in l:", "= 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) )", "key in DETAIL_MODE.keys(): print( key ) if key == \"SCALE\": SUM[\"SCALE\"] += DETAIL_MODE[key]", "key) for key in DETAIL_MODE.keys(): print( key ) if key == \"SCALE\": SUM[\"SCALE\"]", "[] deallocate = [] write_restarta = [] others = [] finalize = []", "computation time for all i = -1 for path in path_l: if not", "copy' elif lab == 'JIT-DT': continue ax.bar( '', dic[key], bottom=acm, label=lab, color=c_l[i] )", "1.0 ) yticks = np.arange( 0, 32, 2 ) ax.set_ylim( 0, 31.0 )", ") #print( SUM_test ) #print( DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM,", "top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [", "transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show()", "\"cycle\", np.nanmean( ctimes ), len(ctimes) ) ) print( '{0:} average: {1:} (N: {2:})'.format(", "scale_l ) key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\",", "== \"WRITE\": dat_ = float( data[6] ) if tit4_ == \"RESTART/GRADS(ANAL)\": tit_ =", "np.mean( rbins[imode:imode+2] ) mean = np.mean( dat ) #print( len(rn), len(rbins), mode )", "width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1 ), len( ftimes ) )", "'{0:} average: {1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes) ) )", "va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig ) if quick_bar: plt.show() else: plt.savefig(", "ha='right', va='top' ) tit_ = key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center',", "data[1] ) DETAIL[tit_].append( dat_ ) except: print( \"Failed\", data ) for key in", "#dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test <", "\"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i if i_ < 0: i_", "pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import matplotlib.pyplot", "os.path.join( top, dir_ ) ) ] #if f.is_file() ] path_l.append( os.path.join( top, dir_,", "int( np.sqrt( dat.size ) ) fig, ax = plt.subplots( 1, 1, figsize=(6,4) )", ") ) print(\"\") DETAIL_MODE = { } DETAIL_MODE_test = { } min_read_obs =", "dat_ ) except: print( \"Failed\", data ) elif '......jitdt_read_toshiba:jitget:' in l: data =", "DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if num > 100: mode_, mean_ =", "ftimes, ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600 ): dirs = [ f.name", "1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches", "* np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins = int( (", "float( data[5] ) if tit_ == 'SCALE': scale_l.append( dat_ ) except: print( \"Failed\",", "DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\", data ) elif", "choise #h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0)", "+= DETAIL_MODE[key] elif key == \"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key ==", "fig, ax = plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92,", "right=0.5, top=0.92, ) #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l =", "min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan dat_jit_ = dat_jit[", "[] gues_mean = [] write_restartg = [] das_letkf = [] anal_mean = []", "import matplotlib.pyplot as plt fig, ax = plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust(", "read_obs_test > max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key", "[] # Prepare file path list for dir_ in dirs: path_l.append( os.path.join( top,", "others = [] finalize = [] jitget = [] DETAIL = { \"SCALE\":", "SUM_test[\"OBS\"] += DETAIL_MODE_test[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": #", "[ f.name for f in os.scandir( top ) ] #if f.is_file() ] ftimes", "\"READ_OBS\": SUM[\"OBS\"] += DETAIL_MODE[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\":", "USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist = False quick_bar = True quick_bar", "+= 1 if tit_ == \"WRITE\": dat_ = float( data[6] ) if tit4_", "= [] set_grid = [] read_gues = [] gues_mean = [] write_restartg =", "( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan", "[ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ] #c_l = [ 'cyan', 'magenta', 'y',", "= 'Memory copy' elif lab == 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab,", "iarray[:] = np.nan DETAIL = {} for key in key_l: if key ==", "in dirs: path_l.append( os.path.join( top, dir_, ) ) scale_l = [] # Get", "'png/2p_d4_bar_scale.png' print( ofig ) if quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches =", ") print( \"std:\", np.std( ftimes, ddof=1 ), len( ftimes ) ) ax2.tick_params( axis='x',", ") ] #if f.is_file() ] path_l.append( os.path.join( top, dir_, fname ) ) #", "average: {1:} (N: {2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes) ) ) print(\"\")", "min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan time_ = np.nanmean(", "dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs )", "bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ): import", "plot_bar( dic={} ): import matplotlib.pyplot as plt fig, ax = plt.subplots( 1, 1,", "), len( ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False ) ax_l", "allow_pickle=True ) as npz: for key in SUM.keys(): SUM[key] = npz[key] ftimes =", "= ax.get_legend_handles_labels() ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=13 ) ax.set_ylabel( 'Computation time (s)',", "key in key_l: if key == 'SCALE': DETAIL[key] = scale_l else: DETAIL[key] =", "(s)' ylab = 'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace(", ")] = np.nan time_ = np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print( key,", "#ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 22, 2 ) ax1.set_ylim( 0,", "= dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ] dat_ = dat[ ~np.isnan(dat) ]", "= plt.subplots( 1, 2, figsize=(6,4) ) # fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )", "np.arange( 0, 32, 2 ) ax.set_ylim( 0, 31.0 ) ax.set_yticks( yticks ) ofig", "#DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else: print( 'Not plot ', key) read_obs_test", "data = l.split() try: tit_ = \"JIT_GET\" dat_ = float( data[1] ) DETAIL[tit_].append(", "numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT = True", "max_read_obs = 30.0 read_obs_ = DETAIL[\"READ_OBS\"] dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ <", "= float( data[5] ) if tit_ == 'SCALE': i += 1 if tit_", "ddof=1 ), len( ftimes ) ) ax2.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False )", "\"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\": process_obs,", ") | ( read_obs_ > max_read_obs )] = np.nan dat_jit_ = dat_jit[ ~np.isnan(dat_jit)", "quick_bar = False def d4_computation_time_nparray( top='' ): dirs = [ f.name for f", "dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] = mean_ else: print( 'Not plot ',", "(N: {2:})'.format( \"fcst \", np.mean( ftimes ), len(ftimes) ) ) print(\"\") DETAIL_MODE =", "bbox_to_anchor=(1.01, 1.00) ) handles, labels = ax1.get_legend_handles_labels() ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00), fontsize=12", "dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ] num = len( dat_ ) if", "100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key] =", "key in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test >", "\"(a)\", \"(b)\" ] for i, ax in enumerate( ax_l ): ax.text( 0.5, 1.01,", "1.01, pnum_l[i], fontsize=10, transform=ax.transAxes, ha='left', va='bottom' ) ofig = 'pdf/Fig06.pdf' print( ofig )", "0.0, \"JIT-DT\": 0.0, } for key in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"]", "= npz[key] ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes'] print( SUM ) #print( DETAIL_MODE", "scale_l = np.array( scale_l ) key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\",", "DETAIL[\"WRITE_ANAL\"][0:5]) #ftimes, ctimes, DETAIL = d4_computation_time( top=top, ) ctimes = np.array( ctimes )", "'[Info:DA]' in l: data = l.split() try: ctimes.append( float( data[6] ) ) except:", "path in path_l: if not os.path.isfile( path ): break with open( path )", "\"DATA TRANSFER\": 0.0, \"JIT-DT\": 0.0, } fn_sum = '{0:}/SUM.npz'.format( data_path, ) fn_ftimes =", "not USE_ARCH_DAT: top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp' top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m' top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max =", "#ax.set_xlim( 0, 1.0 ) yticks = np.arange( 0, 32, 2 ) ax.set_ylim( 0,", "range=(xmin, xmax), bins=bins, alpha=0.6 ) imode = np.argmax( rn ) mode = np.mean(", "== 'SCALE': DETAIL[key] = scale_l else: DETAIL[key] = np.copy( iarray ) # Get", "label=\"30-min forecast\", width=width2, color='dodgerblue' ) print( \"std:\", np.std( ftimes, ddof=1 ), len( ftimes", "else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return( mode, mean )", "def plot_hist( key=\"\", dat=np.array([]) ): import matplotlib.pyplot as plt from scipy import stats", "mean = np.mean( dat ) #print( len(rn), len(rbins), mode ) lw = 1.0", "for l in lines: if '[Info:fcst] End forecast' in l: data = l.split()", "1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, ) rn, rbins, rpatches =", "1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, ) #c_l = [ 'firebrick',", "enumerate( dic2.keys() ): lab = key if lab == 'OBS': lab = 'Obs", "DETAIL_test[key] ) dat = DETAIL_test[key] print( key, dat ) #dat_ = dat[ ~np.isnan(dat)", "os.path.join( top, dir_, ) ) scale_l = [] # Get computation time for", "[] # Get computation time for SCALE for path in path_l: if not", "= 'dashed' color = 'b' ax.vlines( x=mode, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color )", "f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file()", "ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode,", "plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={}", "ax1.set_xlim( 0, 2.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen',", ") ax.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 ) yticks =", ") ax.text( 0.99, 0.99, text_, fontsize=12, transform=ax.transAxes, ha='right', va='top' ) tit_ = key", "DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0, \"OBS\":", "linestyles=ls, color=color ) color = 'k' ax.vlines( x=mean, ymin=ymin, ymax=ymax, linewidths=lw, linestyles=ls, color=color", "file path list for dir_ in dirs: path_l.append( os.path.join( top, dir_, ) )", "> 100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) #DETAIL_MODE[key] = mode_ DETAIL_MODE[key]", "dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] acm2 = 0.0 for", "np.load( fn_sum, allow_pickle=True ) as npz: for key in SUM.keys(): SUM[key] = npz[key]", "dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ] num = len( dat_", "init = [] init_others = [] init_letkf = [] scale = [] others", "= 0 xmax = 60 # Scott's choise #h = 3.5 * np.std(", "return( ftimes, ctimes, DETAIL ) def d4_computation_time( top='', ctmax=600 ): dirs = [", "~np.isnan(dat) & ~np.isnan( dat_jit ) ] num = len( dat_ ) if key", "plt.close('all') return( mode, mean ) def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as", "computation time for path in path_l: if not os.path.isfile( path ): break with", "RESTART/GRADS(ANAL)\", \"OTHERS\", \"FINALIZE\", \"JIT_GET\", ] # prepare nan array iarray = np.zeros( scale_l.shape", "dat_ = dat[ ~np.isnan(dat) ] num = len( dat_ ) # if key", "'Frequency' ax.set_xlabel( xlab, fontsize=11) ax.set_ylabel( ylab, fontsize=11) key_ = key.replace( ' ', '_'", "tit_ = data[3] tit4_ = data[4] dat_ = float( data[5] ) if tit_", "jitget, } # Prepare file path list for dir_ in dirs: fname =", "DETAIL_MODE_test ) #sys.exit() #plot_bar( dic=SUM ) plot_bar_2p( dic=SUM, ftimes=ftimes ) #plot_bar_2p_scale( dic=SUM, dic2=SUM_test,", "= dat_jit[ ~np.isnan(dat_jit) ] for key in DETAIL.keys(): DETAIL[key][ ( read_obs_ < min_read_obs", "dat_ -= dat_jit_ print( \"#### \", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin(", "in l: data = l.split() try: ctimes.append( float( data[6] ) ) except: print(", ") ) fig, ax = plt.subplots( 1, 1, figsize=(6,4) ) fig.subplots_adjust( left=0.15, bottom=0.15,", "1.01, tit_l[i], fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.text( 0.0, 1.01, pnum_l[i], fontsize=10, transform=ax.transAxes,", "in DETAIL_test.keys(): DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs", "\"FINALIZE\": finalize, \"JIT_GET\": jitget, } # Prepare file path list for dir_ in", "i = -1 for path in path_l: if not os.path.isfile( path ): break", "== 'JIT-DT': continue ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm +=", "l.split() try: ctimes.append( float( data[6] ) ) except: print( \"Failed\", data ) elif", "fontsize=11) key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(',", "fname ) ) # Get computation time for path in path_l: if not", "= key ax.text( 0.5, 1.01, tit_, fontsize=12, transform=ax.transAxes, ha='center', va='bottom' ) ax.set_xlim( xmin,", "dat_jit = DETAIL['JIT_GET'] dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ >", "ylab, fontsize=11) key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace(", "init_others = [] init_letkf = [] scale = [] others = [] read_obs", ") text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size ) ax.text( 0.99, 0.99,", "lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min forecast\",", "120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2 = 0.8 ax2.bar( 1, np.mean(ftimes), label=\"30-min", "plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"] #dat_jit_test = DETAIL_test['JIT_GET'] #dat_jit_test[ ( read_obs_test <", "mode_ DETAIL_MODE[key] = mean_ else: print( 'Not plot ', key) read_obs_test = DETAIL_test[\"READ_OBS\"]", "dic[key] acm2 = 0.0 for i, key in enumerate( dic2.keys() ): lab =", "quick_bar: plt.show() else: plt.savefig( ofig, bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') #### SUM", "in DETAIL: DETAIL[tit_][i_] = dat_ else: DETAIL[\"OTHERS\"][i_] = dat_ except: print( \"Failed\", data", "tit_ in DETAIL: DETAIL[tit_].append( dat_ ) else: DETAIL[\"OTHERS\"].append( dat_ ) except: print( \"Failed\",", "DETAIL[key] ) return( ftimes, ctimes, DETAIL ) def plot_hist( key=\"\", dat=np.array([]) ): import", "'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ]", "key_l = [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\",", "[ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ] #c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod'", "#bins = int( ( xmax - xmin ) / h ) # Square-root", "] num = len( dat_ ) # if key == \"READ_OBS\": # dat_", "= [] write_anal = [] deallocate = [] write_restarta = [] others =", "2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray', alpha=0.5 ) width2", "top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000' #dtime_max = 1000 ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, )", "'Not plot ', key) for key in DETAIL_MODE.keys(): print( key ) if key", "np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0) #bins = int( ( xmax", "f in os.scandir( top ) ] #if f.is_file() ] path_l = [] ftimes", "= [ \"SCALE\", \"READ_OBS\", \"OBS_OPERATOR\", \"INITIALIZE\", \"INITIALIZE_OTHERS\", \"INIT_LETKF\", \"PROCESS_OBS\", \"SET_GRID\", \"READ_GUES\", \"GUES_MEAN\", \"WRITE", "{ \"SCALE\": scale, \"READ_OBS\":read_obs, \"OBS_OPERATOR\": obsope, \"INITIALIZE\": init, \"INITIALIZE_OTHERS\": init_others, \"INIT_LETKF\": init_letkf, \"PROCESS_OBS\":", "data_path, exist_ok=True ) USE_ARCH_DAT = True #USE_ARCH_DAT = False quick_hist = False quick_bar", "except: print( \"Failed\", data ) elif '[Info:DA]' in l: data = l.split() try:", ") rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 ) imode", "0, 2.0 ) width1 = 0.8 #c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold'", "ctmax=600 ): dirs = [ f.name for f in os.scandir( top ) ]", "dat.size, 1.0/3.0) #bins = int( ( xmax - xmin ) / h )", "= 'png/1p_d4_{0:}.png'.format( key_ ) print( ofig ) if quick_hist: plt.show() else: plt.savefig( ofig,", "key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) ) if num >", "tit4_ == \"RESTART/GRADS(ANAL)\": tit_ = \"WRITE RESTART/GRADS(ANAL)\" elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ =", "in enumerate( dic.keys() ): lab = key if lab == 'OBS': lab =", "\"DEALLOCATE\": deallocate, \"WRITE RESTART/GRADS(ANAL)\": write_restarta, \"OTHERS\": others, \"FINALIZE\": finalize, \"JIT_GET\": jitget, } #", "+= DETAIL_MODE[key] else: SUM[\"LETKF\"] += DETAIL_MODE[key] SUM_test = { \"SCALE\": 0.0, \"LETKF\": 0.0,", "elif tit4_ == \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i if i_", "time for path in path_l: if not os.path.isfile( path ): break with open(", "for key in DETAIL_MODE_test.keys(): if key == \"SCALE\": SUM_test[\"SCALE\"] += DETAIL_MODE_test[key] elif key", "l: data = l.split() try: tit_ = data[3] dat_ = float( data[5] )", "> max_read_obs )] = np.nan #dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ] for key in", "len( dat_ ) if key == \"READ_OBS\": dat_ -= dat_jit_ print( \"#### \",", "np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key] print( key, dat ) #dat_ = dat[", "== \"RESTART/GRADS(GUES)\": tit_ = \"WRITE RESTART/GRADS(GUES)\" i_ = i if i_ < 0:", "ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] # ax.legend(", "quick_bar = True quick_bar = False def d4_computation_time_nparray( top='' ): dirs = [", "bbox_inches=\"tight\", pad_inches = 0.1) plt.clf() plt.close('all') return( mode, mean ) def plot_bar_2p( dic={},", ") ax2.set_xlim( 0, 2.0 ) ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed', color='gray',", "> max_read_obs )] = np.nan time_ = np.nanmean( DETAIL_test[key] ) dat = DETAIL_test[key]", "1.00), fontsize=12 ) ax1.set_ylabel( 'Computation time (s)', fontsize=12 ) #ax.set_xlim( 0, 1.0 )", "100: mode_, mean_ = plot_hist( key=key, dat=dat_ ) DETAIL_MODE_test[key] = mean_ else: print(", "ax.set_xlim( xmin, xmax ) ax.set_ylim( ymin, ymax ) xlab = 'Computation time (s)'", "import numpy as np data_path = \"../../dat4figs_JAMES/Fig06\" os.makedirs( data_path, exist_ok=True ) USE_ARCH_DAT =", "DETAIL_MODE = { } DETAIL_MODE_test = { } min_read_obs = 1.0 max_read_obs =", "linewidths=lw, linestyles=ls, color=color ) text_ = 'Mean:{0:.3f} s\\nMode:{1:.3f} s\\nN={2:}'.format( mean, mode, dat.size )", "np.zeros( scale_l.shape ) iarray[:] = np.nan DETAIL = {} for key in key_l:", "f.is_file() ] path_l = [] ftimes = [] ctimes = [] # Prepare", "+= DETAIL_MODE[key] # elif key == \"READ_GUES\" or key == \"WRITE_ANAL\": # SUM[\"DATA", "dat_ = float( data[5] ) if tit_ == \"WRITE\": dat_ = float( data[6]", "#ftimes, ctimes, DETAIL = d4_computation_time( top=top, ) ctimes = np.array( ctimes ) print(", "= [] write_restartg = [] das_letkf = [] anal_mean = [] write_anal =", "for l in lines: if '##### TIMER' in l: data = l.split() try:", "ax1.bar( 1.0, dic[key], bottom=acm, label=lab, color=c_l[i], width=width1 ) acm += dic[key] acm2 =", "def plot_bar_2p( dic={}, ftimes=np.array([]) ): import matplotlib.pyplot as plt fig, ( ax1,ax2 )", "= [] scale = [] others = [] read_obs = [] obsope =", "print( \"Failed\", data ) for key in DETAIL.keys(): DETAIL[key] = np.array( DETAIL[key] )", "plt fig, ax = plt.subplots( 1, 1, figsize=(5,5) ) fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5,", ") except: print( \"Failed\", data ) for key in DETAIL.keys(): DETAIL[key] = np.array(", "= [] das_letkf = [] anal_mean = [] write_anal = [] deallocate =", "ax2 ] tit_l = [ \"Data assimilation\", \"30-min forecast\" ] pnum_l = [" ]
[]
[ "import numpy as np import os zeros = np.loadtxt('./bessel_zeros_short.txt') np.save('bessel_zeros_short', zeros) if os.path.exists('./bessel_zeros_short.txt'):", "numpy as np import os zeros = np.loadtxt('./bessel_zeros_short.txt') np.save('bessel_zeros_short', zeros) if os.path.exists('./bessel_zeros_short.txt'): os.remove('./bessel_zeros_short.txt')", "<reponame>GrzegorzMika/Towards-adaptivity-via-a-new-discrepancy-principle-for-Poisson-inverse-problems import numpy as np import os zeros = np.loadtxt('./bessel_zeros_short.txt') np.save('bessel_zeros_short', zeros) if" ]
[ "if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture =", "cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while True:", "# -*- coding: utf-8 -*- # 使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import", "#开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while True: img", "if key == ord(' '): num = num + 1 filename = \"frmaes_%s.jpg\"", "import time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 #", "代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time if __name__ == '__main__': cv.NamedWindow(\"camRra\",", "= 0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) &", "27: break if key == ord(' '): num = num + 1 filename", "http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1)", "= num + 1 filename = \"frmaes_%s.jpg\" % num cv.SaveImage(filename, img) del (capture)", "空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time if __name__ ==", "打开一个视频文件 num = 0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key =", "if key == 27: break if key == ord(' '): num = num", "cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF if key == 27: break", "num = 0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1)", "utf-8 -*- # 使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv", "= cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF if key == 27:", "0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF", "key == ord(' '): num = num + 1 filename = \"frmaes_%s.jpg\" %", "-*- # 使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import", "= cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while", "== ord(' '): num = num + 1 filename = \"frmaes_%s.jpg\" % num", "# 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time if __name__ == '__main__':", "-*- coding: utf-8 -*- # 使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv", "cv2.cv as cv import time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture =", "cv import time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头", "time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture", "== '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") #", "= cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\",", "img) key = cv.WaitKey(1) & 0xFF if key == 27: break if key", "as cv import time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0)", "cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF if key == 27: break if", "break if key == ord(' '): num = num + 1 filename =", "# 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time if __name__", "capture = cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0;", "cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num", "0xFF if key == 27: break if key == ord(' '): num =", "num = num + 1 filename = \"frmaes_%s.jpg\" % num cv.SaveImage(filename, img) del", "# 使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time", "# 打开一个视频文件 num = 0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key", "'): num = num + 1 filename = \"frmaes_%s.jpg\" % num cv.SaveImage(filename, img)", "使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as cv import time if", "= cv.WaitKey(1) & 0xFF if key == 27: break if key == ord('", "True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF if key", "# capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while True: img =", "while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF if", "key == 27: break if key == ord(' '): num = num +", "== 27: break if key == ord(' '): num = num + 1", "& 0xFF if key == 27: break if key == ord(' '): num", "1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num =", "num + 1 filename = \"frmaes_%s.jpg\" % num cv.SaveImage(filename, img) del (capture) cv.DestroyWindow(\"camera\")", "import cv2.cv as cv import time if __name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture", "img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img) key = cv.WaitKey(1) & 0xFF if key ==", "coding: utf-8 -*- # 使用openCV抓取视频 # 空格-->>截图,ESC-->>退出。 # 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977 import cv2.cv as", "key = cv.WaitKey(1) & 0xFF if key == 27: break if key ==", "cv.WaitKey(1) & 0xFF if key == 27: break if key == ord(' '):", "ord(' '): num = num + 1 filename = \"frmaes_%s.jpg\" % num cv.SaveImage(filename,", "__name__ == '__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\")", "capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while True: img = cv.QueryFrame(capture)", "cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件 num = 0; while True: img = cv.QueryFrame(capture) cv.ShowImage(\"camera\", img)", "'__main__': cv.NamedWindow(\"camRra\", 1) capture = cv.CaptureFromCAM(0) #开启摄像头 # capture = cv.CaptureFromFile(\"Video.avi\") # 打开一个视频文件" ]
[ "model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is valid is_valid = (obs['board'][int(col)] == 0)", "select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is", "if is_valid: return int(col) else: return random.choice([col for col in range(config.columns) if obs.board[int(col)]", "Use the best model to select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) #", "column is valid is_valid = (obs['board'][int(col)] == 0) # If not valid, select", "the best model to select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check", "random move. if is_valid: return int(col) else: return random.choice([col for col in range(config.columns)", "a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is valid", "selected column is valid is_valid = (obs['board'][int(col)] == 0) # If not valid,", "valid is_valid = (obs['board'][int(col)] == 0) # If not valid, select random move.", "is valid is_valid = (obs['board'][int(col)] == 0) # If not valid, select random", "# If not valid, select random move. if is_valid: return int(col) else: return", "Check if selected column is valid is_valid = (obs['board'][int(col)] == 0) # If", "col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is valid is_valid =", "column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is valid is_valid", "if selected column is valid is_valid = (obs['board'][int(col)] == 0) # If not", "If not valid, select random move. if is_valid: return int(col) else: return random.choice([col", "# Check if selected column is valid is_valid = (obs['board'][int(col)] == 0) #", "= model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is valid is_valid = (obs['board'][int(col)] ==", "def my_agent(obs, config): # Use the best model to select a column col,", "to select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column", "= (obs['board'][int(col)] == 0) # If not valid, select random move. if is_valid:", "(obs['board'][int(col)] == 0) # If not valid, select random move. if is_valid: return", "best model to select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if", "0) # If not valid, select random move. if is_valid: return int(col) else:", "select random move. if is_valid: return int(col) else: return random.choice([col for col in", "config): # Use the best model to select a column col, _ =", "is_valid = (obs['board'][int(col)] == 0) # If not valid, select random move. if", "is_valid: return int(col) else: return random.choice([col for col in range(config.columns) if obs.board[int(col)] ==", "# Use the best model to select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1))", "model to select a column col, _ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected", "_ = model.predict(np.array(obs['board']).reshape(6,7,1)) # Check if selected column is valid is_valid = (obs['board'][int(col)]", "== 0) # If not valid, select random move. if is_valid: return int(col)", "not valid, select random move. if is_valid: return int(col) else: return random.choice([col for", "valid, select random move. if is_valid: return int(col) else: return random.choice([col for col", "move. if is_valid: return int(col) else: return random.choice([col for col in range(config.columns) if", "my_agent(obs, config): # Use the best model to select a column col, _", "return int(col) else: return random.choice([col for col in range(config.columns) if obs.board[int(col)] == 0])" ]
[ "as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate():", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update()", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update()", "def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe(", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.deactivate()", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe(", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "as board import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor", "def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None def test_temperature_control_update(): sensor =", "import titration.utils.devices.board_mock as board import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def", "sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def", "test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", "= temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", "temperature_controller is not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not", "sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature()", "temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor =", "test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature()", "sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "= temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe(", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None def", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe(", "= temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None def test_temperature_control_update(): sensor", "temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe(", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe(", "def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe(", "sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)", "titration.utils.devices.board_mock as board import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create():", "is not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print():", "= temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "import time import titration.utils.devices.board_mock as board import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as", "titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor =", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print()", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def", "temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature():", "test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", "import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "board import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor =", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor =", "temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate():", "temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor =", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print()", "None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 )", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print():", "board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor =", "temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor", "temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK,", ") temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is not None def test_temperature_control_update():", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1)", "= temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def test_temperature_control_deactivate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK,", "= temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.disable_print() def test_temperature_control_at_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", "def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller =", "sensor) assert temperature_controller is not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI,", "sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", "assert temperature_controller is not None def test_temperature_control_update(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "time import titration.utils.devices.board_mock as board import titration.utils.devices.temperature_control_mock as temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def", "wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor =", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) assert temperature_controller is", "temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3", "test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1,", "= temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate()", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe(", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.activate() def", "temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def", "board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.update() time.sleep(1) temperature_controller.update() def test_temperature_control_enable_print(): sensor", "temperature_controller.at_temperature() def test_temperature_control_last_temperature(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller", "temperature_control import titration.utils.devices.temperature_probe_mock as temperature_probe def test_temperature_control_create(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO,", "board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.enable_print() def test_temperature_control_disable_print(): sensor", "= temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.get_last_temperature() def test_temperature_control_activate(): sensor = temperature_probe.Temperature_Probe( board.SCK, board.MOSI, board.MISO, board.D0,", "board.SCK, board.MOSI, board.MISO, board.D0, wires=3 ) temperature_controller = temperature_control.Temperature_Control(board.D1, sensor) temperature_controller.at_temperature() def test_temperature_control_last_temperature():" ]
[ "dst_conn, _ = edges[0] if dst_conn != '_a': return False return True def", "All rights reserved. \"\"\" Implements the matrix-matrix product transpose transformation. \"\"\" from copy", "@classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at,", "= self.at transpose_b = self.transpose_b _bt = self.bt a_times_b = self.a_times_b for src,", "MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose transformation. T(A) @ T(B) = T(B", "blas transpose_a = self.transpose_a _at = self.at transpose_b = self.transpose_b _bt = self.bt", "the matrix-matrix product transpose transformation. T(A) @ T(B) = T(B @ A) \"\"\"", "slow imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt =", "{transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as blas transpose_a =", "@ A) \"\"\" import dace.libraries.blas as blas # Avoid slow imports transpose_a =", "= self.transpose_a _at = self.at transpose_b = self.transpose_b _bt = self.bt a_times_b =", "blas # Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b =", "self.bt a_times_b = self.a_times_b for src, src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src,", "= [size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c", "reserved. \"\"\" Implements the matrix-matrix product transpose transformation. \"\"\" from copy import deepcopy", "memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _,", "Zurich and the DaCe authors. All rights reserved. \"\"\" Implements the matrix-matrix product", "import dace.libraries.blas as blas transpose_a = self.transpose_a _at = self.at transpose_b = self.transpose_b", "'_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn,", "transpose transformation. T(A) @ T(B) = T(B @ A) \"\"\" import dace.libraries.blas as", "return True def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b =", "Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. \"\"\" Implements", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. \"\"\"", "False return True def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b", "Implements the matrix-matrix product transpose transformation. \"\"\" from copy import deepcopy as dcpy", "dcpy(memlet.subset) subset.squeeze() size = subset.size() shape = [size[1], size[0]] break tmp_name, tmp_arr =", "edges[0] if dst_conn != '_a': return False return True def match_to_str(self, graph): transpose_a", "for edge in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c,", "self.a_times_b for src, src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b',", "graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr)) graph.add_edge(tmp_acc, None,", "in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _,", "None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self, graph, expr_index,", "self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce unique match if", "memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr)) graph.add_edge(tmp_acc, None, transpose_c, '_inp', dace.Memlet.from_array(tmp_name, tmp_arr))", "subset.squeeze() size = subset.size() shape = [size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape,", "= transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b)", "SDFGState, sdfg: SDFG): import dace.libraries.blas as blas transpose_a = self.transpose_a _at = self.at", "_, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size = subset.size()", "graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b,", "a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt)", "memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None,", "dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr)) graph.add_edge(tmp_acc, None, transpose_c, '_inp', dace.Memlet.from_array(tmp_name,", "expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at,", "graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as blas transpose_a = self.transpose_a _at =", "transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul)", "_, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt)", "_, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for", "can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at = self.at _a_times_b = self.a_times_b edges =", "dace.libraries.blas as blas # Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode)", "tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge", "graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _, dst, dst_conn,", "= T(B @ A) \"\"\" import dace.libraries.blas as blas # Avoid slow imports", "memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _,", "matrix-matrix product transpose transformation. \"\"\" from copy import deepcopy as dcpy import dace", "dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc,", "= self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <-", "dace.sdfg.state import SDFGState from dace.transformation import transformation from dace.properties import make_properties @make_properties class", "[graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at = self.at _a_times_b = self.a_times_b", "a_times_b = self.a_times_b for src, src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn,", "import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose transformation. T(A)", "as blas transpose_a = self.transpose_a _at = self.at transpose_b = self.transpose_b _bt =", "transformation. \"\"\" from copy import deepcopy as dcpy import dace from dace.sdfg import", "transformation. T(A) @ T(B) = T(B @ A) \"\"\" import dace.libraries.blas as blas", "sdfg, permissive=False): _at = self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b) #", "self.at transpose_b = self.transpose_b _bt = self.bt a_times_b = self.a_times_b for src, src_conn,", "transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls):", "expr_index, sdfg, permissive=False): _at = self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b)", "and the DaCe authors. All rights reserved. \"\"\" Implements the matrix-matrix product transpose", "= transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b =", "from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose", "src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn, memlet", "None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at", "dst_conn != '_a': return False return True def match_to_str(self, graph): transpose_a = self.transpose_a", "from dace.transformation import transformation from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements", "def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at = self.at _a_times_b = self.a_times_b edges", "unique match if len(edges) != 1: return False _, _, _, dst_conn, _", "!= '_a': return False return True def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b", "_at = self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce unique", "dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose transformation.", "src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for", "graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at =", "True def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b", "return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG): import", "transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet", "def apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as blas transpose_a = self.transpose_a", "1: return False _, _, _, dst_conn, _ = edges[0] if dst_conn !=", "permissive=False): _at = self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce", "as gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState from dace.transformation import", "graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self,", "matrix-matrix product transpose transformation. T(A) @ T(B) = T(B @ A) \"\"\" import", "return False _, _, _, dst_conn, _ = edges[0] if dst_conn != '_a':", "'_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b):", "in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size = subset.size() shape = [size[1], size[0]]", "for src, src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet)", "src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _, _, memlet in graph.in_edges(transpose_b):", "T(B) = T(B @ A) \"\"\" import dace.libraries.blas as blas # Avoid slow", "dcpy import dace from dace.sdfg import nodes, graph as gr from dace.sdfg.sdfg import", "as blas # Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b", "memlet) graph.remove_node(transpose_a) for src, src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b,", "tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for", "None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def", "src, src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a)", "-> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as", "if len(edges) != 1: return False _, _, _, dst_conn, _ = edges[0]", "for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size", "in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst,", "T(B @ A) \"\"\" import dace.libraries.blas as blas # Avoid slow imports transpose_a", "import deepcopy as dcpy import dace from dace.sdfg import nodes, graph as gr", "dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr)) graph.add_edge(tmp_acc, None, transpose_c, '_inp',", "transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name}", "graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b,", "copy import deepcopy as dcpy import dace from dace.sdfg import nodes, graph as", "2019-2021 ETH Zurich and the DaCe authors. All rights reserved. \"\"\" Implements the", "transpose_b = self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def", "= transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def", "_at = self.at transpose_b = self.transpose_b _bt = self.bt a_times_b = self.a_times_b for", "graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size = subset.size() shape = [size[1], size[0]] break", "@ T(B) = T(B @ A) \"\"\" import dace.libraries.blas as blas # Avoid", "transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph", "SDFG from dace.sdfg.state import SDFGState from dace.transformation import transformation from dace.properties import make_properties", "graph.edges_between(_at, _a_times_b) # Enforce unique match if len(edges) != 1: return False _,", "def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b return", "a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState,", "SDFGState from dace.transformation import transformation from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\"", "A) \"\"\" import dace.libraries.blas as blas # Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose)", "_, _, _, dst_conn, _ = edges[0] if dst_conn != '_a': return False", "cls.a_times_b, None) return [graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at = self.at", "in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _, _,", "= self.bt a_times_b = self.a_times_b for src, src_conn, _, _, memlet in graph.in_edges(transpose_a):", "memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size = subset.size() shape = [size[1],", "self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG):", "from dace.sdfg import nodes, graph as gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state", "subset.size() shape = [size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc =", "import transformation from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix", "= self.a_times_b for src, src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b,", "transpose_a = self.transpose_a _at = self.at transpose_b = self.transpose_b _bt = self.bt a_times_b", "transpose transformation. \"\"\" from copy import deepcopy as dcpy import dace from dace.sdfg", "authors. All rights reserved. \"\"\" Implements the matrix-matrix product transpose transformation. \"\"\" from", "product transpose transformation. T(A) @ T(B) = T(B @ A) \"\"\" import dace.libraries.blas", "_, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn,", "self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce unique match if len(edges) != 1:", "match_to_str(self, graph): transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name}", "= self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce unique match if len(edges) !=", "edges = graph.edges_between(_at, _a_times_b) # Enforce unique match if len(edges) != 1: return", "size = subset.size() shape = [size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype)", "sdfg: SDFG): import dace.libraries.blas as blas transpose_a = self.transpose_a _at = self.at transpose_b", "dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState from dace.transformation import transformation from dace.properties", "edge in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out',", "= transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph =", "dace.libraries.blas as blas transpose_a = self.transpose_a _at = self.at transpose_b = self.transpose_b _bt", "SDFG): import dace.libraries.blas as blas transpose_a = self.transpose_a _at = self.at transpose_b =", "dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size = subset.size() shape =", "= self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg:", "sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b):", "Implements the matrix-matrix product transpose transformation. T(A) @ T(B) = T(B @ A)", "class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose transformation. T(A) @ T(B) =", "memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset", "the DaCe authors. All rights reserved. \"\"\" Implements the matrix-matrix product transpose transformation.", "_, _, dst_conn, _ = edges[0] if dst_conn != '_a': return False return", "= edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name,", "_ = edges[0] if dst_conn != '_a': return False return True def match_to_str(self,", "transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph()", "import SDFGState from dace.transformation import transformation from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation):", "import dace.libraries.blas as blas # Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at =", "graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt,", "# Enforce unique match if len(edges) != 1: return False _, _, _,", "ETH Zurich and the DaCe authors. All rights reserved. \"\"\" Implements the matrix-matrix", "from copy import deepcopy as dcpy import dace from dace.sdfg import nodes, graph", "cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self, graph,", "self.transpose_a _at = self.at transpose_b = self.transpose_b _bt = self.bt a_times_b = self.a_times_b", "= dcpy(memlet.subset) subset.squeeze() size = subset.size() shape = [size[1], size[0]] break tmp_name, tmp_arr", "= self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self,", "= sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in", "T(A) @ T(B) = T(B @ A) \"\"\" import dace.libraries.blas as blas #", "transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b)", "import nodes, graph as gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState", "dace.sdfg import nodes, graph as gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state import", "'_a': return False return True def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b =", "f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas", "product transpose transformation. \"\"\" from copy import deepcopy as dcpy import dace from", "dst, dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size = subset.size() shape", "_, dst_conn, _ = edges[0] if dst_conn != '_a': return False return True", "transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b", "= graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _, dst,", "blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet = edge", "dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c',", "!= 1: return False _, _, _, dst_conn, _ = edges[0] if dst_conn", "cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph]", "gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b,", "Enforce unique match if len(edges) != 1: return False _, _, _, dst_conn,", "break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype)", "\"\"\" from copy import deepcopy as dcpy import dace from dace.sdfg import nodes,", "\"\"\" Implements the matrix-matrix product transpose transformation. \"\"\" from copy import deepcopy as", "graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr)) graph.add_edge(tmp_acc,", "graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt,", "DaCe authors. All rights reserved. \"\"\" Implements the matrix-matrix product transpose transformation. \"\"\"", "graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self, graph, expr_index, sdfg,", "= transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at)", "apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as blas transpose_a = self.transpose_a _at", "_, _, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze() size =", "shape = [size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name)", "graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset =", "from dace.sdfg.state import SDFGState from dace.transformation import transformation from dace.properties import make_properties @make_properties", "transformation from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product", "deepcopy as dcpy import dace from dace.sdfg import nodes, graph as gr from", "_bt = self.bt a_times_b = self.a_times_b for src, src_conn, _, _, memlet in", "src, src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b)", "# Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose)", "a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _,", "False _, _, _, dst_conn, _ = edges[0] if dst_conn != '_a': return", "the matrix-matrix product transpose transformation. \"\"\" from copy import deepcopy as dcpy import", "= graph.edges_between(_at, _a_times_b) # Enforce unique match if len(edges) != 1: return False", "graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn,", "graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _, _, memlet", "@make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose transformation. T(A) @ T(B)", "graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return", "a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src,", "as dcpy import dace from dace.sdfg import nodes, graph as gr from dace.sdfg.sdfg", "make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the matrix-matrix product transpose transformation. T(A) @", "_, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src,", "graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst,", "= blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet =", "dace.transformation import transformation from dace.properties import make_properties @make_properties class MatrixProductTranspose(transformation.SingleStateTransformation): \"\"\" Implements the", "transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a,", "_a_times_b) # Enforce unique match if len(edges) != 1: return False _, _,", "graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset)", "if dst_conn != '_a': return False return True def match_to_str(self, graph): transpose_a =", "def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None)", "graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None)", "tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _,", "= gr.OrderedDiGraph() graph.add_node(cls.transpose_a) graph.add_node(cls.at) graph.add_node(cls.transpose_b) graph.add_node(cls.bt) graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None)", "graph.add_node(cls.a_times_b) graph.add_edge(cls.transpose_a, cls.at, None) graph.add_edge(cls.at, cls.a_times_b, None) graph.add_edge(cls.transpose_b, cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None)", "dace from dace.sdfg import nodes, graph as gr from dace.sdfg.sdfg import SDFG from", "= edges[0] if dst_conn != '_a': return False return True def match_to_str(self, graph):", "import dace from dace.sdfg import nodes, graph as gr from dace.sdfg.sdfg import SDFG", "return [graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at = self.at _a_times_b =", "\"\"\" import dace.libraries.blas as blas # Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at", "subset = dcpy(memlet.subset) subset.squeeze() size = subset.size() shape = [size[1], size[0]] break tmp_name,", "graph.remove_node(transpose_a) for src, src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a',", "_, dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b,", "rights reserved. \"\"\" Implements the matrix-matrix product transpose transformation. \"\"\" from copy import", "_a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce unique match if len(edges)", "len(edges) != 1: return False _, _, _, dst_conn, _ = edges[0] if", "= subset.size() shape = [size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc", "gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState from dace.transformation import transformation", "Avoid slow imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt", "= self.transpose_b _bt = self.bt a_times_b = self.a_times_b for src, src_conn, _, _,", "size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c = blas.Transpose('_Transpose_',", "\"\"\" Implements the matrix-matrix product transpose transformation. T(A) @ T(B) = T(B @", "edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr))", "match if len(edges) != 1: return False _, _, _, dst_conn, _ =", "for src, src_conn, _, _, memlet in graph.in_edges(transpose_a): graph.add_edge(src, src_conn, a_times_b, '_b', memlet)", "cls.bt, None) graph.add_edge(cls.bt, cls.a_times_b, None) return [graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False):", "graph): transpose_a = self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} ->", "graph.remove_node(_bt) for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b): subset = dcpy(memlet.subset) subset.squeeze()", "_, _, dst, dst_conn, memlet = edge graph.remove_edge(edge) graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet)", "<filename>dace/transformation/dataflow/matrix_product_transpose.py # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.", "at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod", "nodes, graph as gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState from", "self.transpose_a transpose_b = self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\"", "graph.add_edge(src, src_conn, a_times_b, '_b', memlet) graph.remove_node(transpose_a) for src, src_conn, _, _, memlet in", "[size[1], size[0]] break tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype) tmp_acc = graph.add_access(tmp_name) transpose_c =", "graph, expr_index, sdfg, permissive=False): _at = self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at,", "<- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as blas transpose_a", "src_conn, _, _, memlet in graph.in_edges(transpose_b): graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at)", "imports transpose_a = transformation.PatternNode(blas.Transpose) at = transformation.PatternNode(nodes.AccessNode) transpose_b = transformation.PatternNode(blas.Transpose) bt = transformation.PatternNode(nodes.AccessNode)", "transpose_b = self.transpose_b _bt = self.bt a_times_b = self.a_times_b for src, src_conn, _,", "a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn, memlet in", "None) return [graph] def can_be_applied(self, graph, expr_index, sdfg, permissive=False): _at = self.at _a_times_b", "a_times_b.dtype) for edge in graph.out_edges(a_times_b): _, _, dst, dst_conn, memlet = edge graph.remove_edge(edge)", "'_out', dst, dst_conn, memlet) graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr)) graph.add_edge(tmp_acc, None, transpose_c,", "self.transpose_b _bt = self.bt a_times_b = self.a_times_b for src, src_conn, _, _, memlet", "self.transpose_b a_times_b = self.a_times_b return f\"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}\" def apply(self, graph:", "from dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState from dace.transformation import transformation from", "graph.add_edge(src, src_conn, a_times_b, '_a', memlet) graph.remove_node(transpose_b) graph.remove_node(_at) graph.remove_node(_bt) for _, _, dst, dst_conn,", "bt = transformation.PatternNode(nodes.AccessNode) a_times_b = transformation.PatternNode(blas.MatMul) @classmethod def expressions(cls): graph = gr.OrderedDiGraph() graph.add_node(cls.transpose_a)", "{a_times_b.name} <- {transpose_b.name}\" def apply(self, graph: SDFGState, sdfg: SDFG): import dace.libraries.blas as blas", "return False return True def match_to_str(self, graph): transpose_a = self.transpose_a transpose_b = self.transpose_b", "= self.at _a_times_b = self.a_times_b edges = graph.edges_between(_at, _a_times_b) # Enforce unique match", "import SDFG from dace.sdfg.state import SDFGState from dace.transformation import transformation from dace.properties import", "graph as gr from dace.sdfg.sdfg import SDFG from dace.sdfg.state import SDFGState from dace.transformation" ]
[ "- !MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf level value\" - leaf_lazy:", "for testing lazy/eager YAML evaluation # Since YAML defaults to lazy evaluation, the", "test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test is provided by the cobald", "lazy evaluation, the arguments available during evaluation # are not necessarily complete. class", "!MockPool \"\"\" ) with load(config.name): assert True assert True def test_load_invalid(self): \"\"\"Load a", "tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section: str): return next( content for", "available during evaluation # are not necessarily complete. class TagTracker: \"\"\"Helper to track", "assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "!Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "[{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "config with mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "level value\" nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as", "with load(config.name) as config: section = get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert", "load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly", ") with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert", "test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "value\" \"\"\" ) with load(config.name) as config: section = get_config_section(config, \"__config_test\") args, kwargs", "with load(config.name): assert True assert True def test_load_invalid(self): \"\"\"Load a invalid YAML config", "as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool", "tagged.orig_kwargs[\"nested\"] == [] # ...but should be there in the end assert tagged.final_kwargs[\"nested\"]", "= copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state of arguments *after* YAML evaluation", "user_name: tardis scopes: - user:read \"\"\" ) with load(config.name) as config: tagged =", "- !MockPool \"\"\" ) with load(config.name): assert True assert True def test_load_invalid(self): \"\"\"Load", "= get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert args == () assert kwargs[\"top\"]", "configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML evaluation # Since YAML", "YAML defaults to lazy evaluation, the arguments available during evaluation # are not", "evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state of arguments *after*", "high_allocation: 0.9 - !MockPool \"\"\" ) with load(config.name) as config: pipeline = get_config_section(config,", "\"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using", "\"leaf\": \"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert", "\"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self):", "pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "\"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs ==", ") with pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading a YAML", "necessarily complete. class TagTracker: \"\"\"Helper to track the arguments supplied to YAML !Tags\"\"\"", "complete. class TagTracker: \"\"\"Helper to track the arguments supplied to YAML !Tags\"\"\" def", "top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def", "value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def", "!Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "with pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading a YAML config", "as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation:", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top: \"top", "level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily", "are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\" } leaf_lazy", "with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a YAML config with", "self.orig_kwargs = copy.deepcopy(kwargs) # the state of arguments *after* YAML evaluation self.final_args =", "as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController", "tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure,", "YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "assert isinstance(tagged, TagTracker) # eager loading => all data should exist immediately assert", "() assert kwargs[\"top\"] == \"top level value\" assert kwargs[\"nested\"] == [{\"leaf\": \"leaf level", "load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool import MockPool # register", "TagTracker) # eager loading => only some data should exist immediately... assert tagged.orig_kwargs[\"top\"]", "= kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def", "top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] ==", "write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things: foo:", "as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top: \"top level", "!TagTrackerLazy top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\" )", "nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section: str): return", "creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "== tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "assert isinstance(tagged, TagTracker) # eager loading => only some data should exist immediately...", "0.9 high_allocation: 1.1 - !MockPool \"\"\" ) with load(config.name): assert True assert True", "as write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name):", "assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with", "NamedTemporaryFile import pytest import copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load,", "= get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags", "COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool import MockPool # register test", "= copy.deepcopy(kwargs) # the state of arguments *after* YAML evaluation self.final_args = args", "def test_load(self): \"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "__config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256 users: - user_name: tardis", "state of arguments *after* YAML evaluation self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor(", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager", "cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "user:read \"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged,", "assert True def test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\" with", "!MockPool __config_test: tagged: !TagTrackerEager top: \"top level value\" nested: - leaf: \"leaf level", "COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict,", "with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "= top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]", "pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level value\" nested: - leaf:", "missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "are not necessarily complete. class TagTracker: \"\"\"Helper to track the arguments supplied to", "== { \"leaf\": \"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides", "the state of arguments *after* YAML evaluation self.final_args = args self.final_kwargs = kwargs", "== [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" #", "is provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "def get_config_section(config: dict, section: str): return next( content for plugin, content in config.items()", "\"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\"", "a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "\"__config_test\") args, kwargs = section[\"settings_tag\"] assert args == () assert kwargs[\"top\"] == \"top", "1.1 - !MockPool random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert", "pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\" ) with", "assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with", "leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level", ") def get_config_section(config: dict, section: str): return next( content for plugin, content in", "tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using them\"\"\" with", "package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "plugin, content in config.items() if plugin.section == section ) class TestYamlConfig: def test_load(self):", "not necessarily complete. class TagTracker: \"\"\"Helper to track the arguments supplied to YAML", "settings_tag: !__yaml_tag_test top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\"", "config.items() if plugin.section == section ) class TestYamlConfig: def test_load(self): \"\"\"Load a valid", "*during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state of", "self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state of arguments *after* YAML", "!__yaml_tag_test top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\" )", "copy.deepcopy(kwargs) # the state of arguments *after* YAML evaluation self.final_args = args self.final_kwargs", "with decorator settings\"\"\" # __yaml_tag_test is provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\")", "low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError):", "YAML evaluation # Since YAML defaults to lazy evaluation, the arguments available during", "*args, **kwargs): # the state of arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args)", "section ) class TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "plugin.section == section ) class TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML config\"\"\"", "test_load_missing(self): \"\"\"Forbid loading a YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "with mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things:", "write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool", "tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self):", "during evaluation # are not necessarily complete. class TagTracker: \"\"\"Helper to track the", "args == () assert kwargs[\"top\"] == \"top level value\" assert kwargs[\"nested\"] == [{\"leaf\":", "assert False def test_load_missing(self): \"\"\"Forbid loading a YAML config with missing content\"\"\" with", "pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load", "= get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => only some data", "evaluation # Since YAML defaults to lazy evaluation, the arguments available during evaluation", "be there in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def", "...mock.pool import MockPool # register test pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\",", "NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController", "False def test_load_missing(self): \"\"\"Forbid loading a YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "== \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs", "level value\" \"\"\" ) with load(config.name) as config: section = get_config_section(config, \"__config_test\") args,", "write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234", "step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => all data should exist", "class TagTracker: \"\"\"Helper to track the arguments supplied to YAML !Tags\"\"\" def __init__(self,", "valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "\"\"\" ) with load(config.name) as config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController)", "assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def", "nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: tagged", "isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state of arguments *after* YAML evaluation self.final_args", "- !MockPool \"\"\" ) with load(config.name) as config: pipeline = get_config_section(config, \"pipeline\") assert", "immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "[] # ...but should be there in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\":", "127.0.0.1 port: 1234 algorithm: HS256 users: - user_name: tardis scopes: - user:read \"\"\"", "with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: -", "low_utilisation: 0.9 foo: 0 - !MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name): assert", "load(config.name) as config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool)", "write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\"", "random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self):", "!TagTrackerEager top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\" )", "pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation:", "1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"]", "\"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo: 0 - !MockPool \"\"\" ) with", "# eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self):", "pipeline: - !LinearController low_utilisation: 0.9 foo: 0 - !MockPool \"\"\" ) with pytest.raises(TypeError):", "as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level", "eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"]", "bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading", "\"\"\" ) with load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags", "leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level value\"", "\"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test is provided by the cobald package", "by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML evaluation # Since YAML defaults", "pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256 users:", "\"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top: \"top level value\" nested: -", "config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged:", "NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - __type__:", "\"\"\" ) with pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading a", "- !MockPool __config_test: tagged: !TagTrackerLazy top: \"top level value\" nested: - leaf: \"leaf", "COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section: str): return next( content", "with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed pipeline", "test_load_dangling(self): \"\"\"Forbid loading a YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML", "with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading a YAML config", "with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "\"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => only some data should exist", "the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags", "section[\"settings_tag\"] assert args == () assert kwargs[\"top\"] == \"top level value\" assert kwargs[\"nested\"]", "from cobald.controller.linear import LinearController from ...mock.pool import MockPool # register test pool as", "yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool import MockPool # register test pool", "high_allocation: 1.1 - !MockPool \"\"\" ) with load(config.name): assert True assert True def", "eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\"", "arguments supplied to YAML !Tags\"\"\" def __init__(self, *args, **kwargs): # the state of", "0.9 foo: 0 - !MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name): assert False", "Helpers for testing lazy/eager YAML evaluation # Since YAML defaults to lazy evaluation,", "== \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] ==", "*after* YAML evaluation self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True)", "pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\" ) with load(config.name):", "# eager loading => only some data should exist immediately... assert tagged.orig_kwargs[\"top\"] ==", "level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test is provided", "str): return next( content for plugin, content in config.items() if plugin.section == section", "a invalid YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "load(config.name): assert True assert True def test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid", "get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234", "def test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed pipeline step creation methods\"\"\" with", "__type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\" ) with load(config.name) as", "as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading =>", "import load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool import MockPool #", "False def test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed pipeline step creation methods\"\"\"", "HS256 users: - user_name: tardis scopes: - user:read \"\"\" ) with load(config.name) as", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo: 0 -", "__yaml_tag_test is provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "0.9 high_allocation: 1.1 - !MockPool random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with", "0.9 high_allocation: 0.9 - !MockPool \"\"\" ) with load(config.name) as config: pipeline =", "exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [] #", "tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => only some", "lazy/eager YAML evaluation # Since YAML defaults to lazy evaluation, the arguments available", "with load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated", "as config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def", "\"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "eager loading => only some data should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top", "config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9", "low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\" ) with load(config.name): assert True assert", "tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using them\"\"\" with", "...but should be there in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level", ") with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a YAML config", "from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool import", "to track the arguments supplied to YAML !Tags\"\"\" def __init__(self, *args, **kwargs): #", "__config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy nested:", "import LinearController from ...mock.pool import MockPool # register test pool as safe for", "nested: - leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf", "YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML evaluation # Since", "{ \"leaf\": \"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness", "\"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"]", "def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading a YAML config with missing", "__config_test: tagged: !TagTrackerEager top: \"top level value\" nested: - leaf: \"leaf level value\"", "\"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "**kwargs): # the state of arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs", "constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section: str):", "leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: section = get_config_section(config,", "pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf level value\" -", "True def test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "foo: 0 - !MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name): assert False def", "loading a YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "== [] # ...but should be there in the end assert tagged.final_kwargs[\"nested\"] ==", "assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using them\"\"\"", "def test_load_missing(self): \"\"\"Forbid loading a YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "True assert True def test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\"", "tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but should be", "should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == []", "should be there in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]", "to YAML !Tags\"\"\" def __init__(self, *args, **kwargs): # the state of arguments *during*", "TagTracker: \"\"\"Helper to track the arguments supplied to YAML !Tags\"\"\" def __init__(self, *args,", "\"\"\" ) with load(config.name): assert True assert True def test_load_invalid(self): \"\"\"Load a invalid", "arguments *after* YAML evaluation self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker,", "with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "\"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags", "level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] ==", "YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but should", "a YAML config with mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation:", "== section ) class TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML config\"\"\" with", "\"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading", "leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as", "write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level value\"", "track the arguments supplied to YAML !Tags\"\"\" def __init__(self, *args, **kwargs): # the", "- leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level", "\"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test is", "- user:read \"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert", "# eager loading => all data should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top", "cobald.controller.linear import LinearController from ...mock.pool import MockPool # register test pool as safe", "eager loading => all data should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level", "copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear", "ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool", "tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"]", "import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from", "overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags", "\"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading a", "pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top: \"top level value\" nested: - leaf:", "YAML evaluation self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) )", "isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] ==", "value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\":", "pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things: foo: bar \"\"\"", "assert tagged.orig_kwargs[\"nested\"] == [] # ...but should be there in the end assert", "cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController", "__init__(self, *args, **kwargs): # the state of arguments *during* YAML evaluation self.orig_args =", "\"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\"", "safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML evaluation", "arguments available during evaluation # are not necessarily complete. class TagTracker: \"\"\"Helper to", "users: - user_name: tardis scopes: - user:read \"\"\" ) with load(config.name) as config:", "with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"]", "get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert args == () assert kwargs[\"top\"] ==", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\" ) with", "only some data should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert", "the arguments available during evaluation # are not necessarily complete. class TagTracker: \"\"\"Helper", "# __yaml_tag_test is provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with", "args, kwargs = section[\"settings_tag\"] assert args == () assert kwargs[\"top\"] == \"top level", "!MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level value\" nested: - leaf: \"leaf level", "- leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name)", "\"\"\"Load !Tags with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "!TagTrackerEager nested: - leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy nested: - leaf:", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested:", "pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading a YAML config with", "tardis scopes: - user:read \"\"\" ) with load(config.name) as config: tagged = get_config_section(config,", ") with load(config.name) as config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert", "foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid", "== [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with", "!Tags\"\"\" def __init__(self, *args, **kwargs): # the state of arguments *during* YAML evaluation", "dict, section: str): return next( content for plugin, content in config.items() if plugin.section", "\"w\") as write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with", "value\" - leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level value\" \"\"\" ) with", "YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state of arguments", "__config_test: tagged: !TagTrackerLazy top: \"top level value\" nested: - leaf: \"leaf level value\"", "# ...but should be there in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf", "!TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256 users: - user_name: tardis scopes: -", "- !MockPool random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False", "loading a YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\":", "\"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a YAML", "assert False def test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed pipeline step creation", "!MockPool \"\"\" ) with load(config.name) as config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0],", "- leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: top_eager =", "exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf", "config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag:", "TagTracker) # eager loading => all data should exist immediately assert tagged.orig_kwargs[\"top\"] ==", "load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading a YAML config with missing content\"\"\"", "host: 127.0.0.1 port: 1234 algorithm: HS256 users: - user_name: tardis scopes: - user:read", "- __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\" ) with load(config.name)", "leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: tagged = get_config_section(config,", "tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "port: 1234 algorithm: HS256 users: - user_name: tardis scopes: - user:read \"\"\" )", "with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging: version:", "\"top level value\" nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name)", "- leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: tagged =", "test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "tagged: !TagTrackerLazy top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\"", "\"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure,", "write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top: \"top level value\"", "content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging:", "lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config:", "value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but should be there in the end", "write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert", "with load(config.name) as config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target,", "test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", ") with load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are", "substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "\"\"\" ) with load(config.name) as config: section = get_config_section(config, \"__config_test\") args, kwargs =", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo:", "import copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from", "return next( content for plugin, content in config.items() if plugin.section == section )", "self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\",", "- user_name: tardis scopes: - user:read \"\"\" ) with load(config.name) as config: tagged", "constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section: str): return next( content for plugin,", "write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top: \"top level value\" nested:", "testing lazy/eager YAML evaluation # Since YAML defaults to lazy evaluation, the arguments", "tempfile import NamedTemporaryFile import pytest import copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config", "pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed", ") with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) #", "\"\"\"Helper to track the arguments supplied to YAML !Tags\"\"\" def __init__(self, *args, **kwargs):", "tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section:", "import MockPool # register test pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool))", "\"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things: foo: bar", "= get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] ==", "tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\" }", "write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo: 0 - !MockPool \"\"\" )", ") with load(config.name) as config: section = get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"]", "end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with", "config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top: \"top level", "0 - !MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self):", "as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo: 0 - !MockPool", "!Tags with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "MockPool # register test pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) #", "def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "# the state of arguments *after* YAML evaluation self.final_args = args self.final_kwargs =", "LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"]", "as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\"", "level value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but should be there in the", "a YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "== \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load", "load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed pipeline step", "immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level", ") with load(config.name): assert True assert True def test_load_invalid(self): \"\"\"Load a invalid YAML", "leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: top_eager = get_config_section(config,", "class TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "= args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker,", "assert False def test_load_dangling(self): \"\"\"Forbid loading a YAML config with dangling content\"\"\" with", "nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: top_eager", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 -", "evaluation self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor(", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9", "isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "<gh_stars>1-10 from tempfile import NamedTemporaryFile import pytest import copy from cobald.daemon.config.mapping import ConfigurationError", "all data should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"]", "0.9 - !MockPool \"\"\" ) with load(config.name) as config: pipeline = get_config_section(config, \"pipeline\")", "Since YAML defaults to lazy evaluation, the arguments available during evaluation # are", "loading => only some data should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level", "write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm:", "=> all data should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert", "constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML evaluation # Since YAML defaults to", "tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\"", "the state of arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs)", "value\" \"\"\" ) with load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager", "scopes: - user:read \"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"]", "isinstance(tagged, TagTracker) # eager loading => all data should exist immediately assert tagged.orig_kwargs[\"top\"]", "laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with", "- leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: section =", "config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\" )", "there in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self):", "write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo: 0 - !MockPool \"\"\"", "test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "\"leaf level value\" - leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level value\" \"\"\"", "from tempfile import NamedTemporaryFile import pytest import copy from cobald.daemon.config.mapping import ConfigurationError from", "with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested: -", "leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\"", "state of arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) #", "invalid YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "get_config_section(config: dict, section: str): return next( content for plugin, content in config.items() if", "level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load", "top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness", "!MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading", "config: section = get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert args == ()", "# eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level", "self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) )", "tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert", "config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager:", "from ...mock.pool import MockPool # register test pool as safe for YAML configurations", "level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs", "as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly assert", "= get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == {", "as config: section = get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert args ==", "settings\"\"\" # __yaml_tag_test is provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config:", "config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation:", "!MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy", "methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "write_stream.write( \"\"\" logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False", "!Tags with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "== \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but should be there", "for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing lazy/eager YAML evaluation #", "__config_test: settings_tag: !__yaml_tag_test top: \"top level value\" nested: - leaf: \"leaf level value\"", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top:", "== [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags", "as write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 -", "== () assert kwargs[\"top\"] == \"top level value\" assert kwargs[\"nested\"] == [{\"leaf\": \"leaf", "NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging: version: 1.0", "- !MockPool __config_test: tagged: !TagTrackerEager top: \"top level value\" nested: - leaf: \"leaf", "as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\"", "# Since YAML defaults to lazy evaluation, the arguments available during evaluation #", "tagged: !TagTrackerEager top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\"", "mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]", "def test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "\"leaf level value\" \"\"\" ) with load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"]", "substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test", "evaluation, the arguments available during evaluation # are not necessarily complete. class TagTracker:", "using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self):", "# register test pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers", "value\" nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config:", "\"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256", "\"\"\" logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def", ") with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading a YAML", "loading => all data should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\"", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top:", "== \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately", "assert kwargs[\"top\"] == \"top level value\" assert kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]", "top: \"top level value\" nested: - leaf: \"leaf level value\" \"\"\" ) with", "evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\" } leaf_lazy =", "as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test:", "write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf", "\"leaf level value\" \"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"]", "content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => all", "isinstance(tagged, TagTracker) # eager loading => only some data should exist immediately... assert", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9", "keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\"", "!MockPool random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def", "\"\"\"Load !Tags with substructure, lazily using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\"", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top", "from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear import", "nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config: section", "[\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "defaults to lazy evaluation, the arguments available during evaluation # are not necessarily", "content in config.items() if plugin.section == section ) class TestYamlConfig: def test_load(self): \"\"\"Load", "\"\"\"Load a YAML config with mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager", "a YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\"", "config: pipeline = get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self):", "tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => all data", "YAML !Tags\"\"\" def __init__(self, *args, **kwargs): # the state of arguments *during* YAML", "} leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] # eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf", "test_load(self): \"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\")", "eagerness overrides laziness assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load", "write_stream.write( \"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level value\" nested:", "tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"]", "(invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool", "as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port:", "= get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => all data should", "evaluation # are not necessarily complete. class TagTracker: \"\"\"Helper to track the arguments", "assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested", "get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\":", "\"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf", "[{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test", "- !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256 users: -", "the cobald package with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write(", "- !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things: foo: bar \"\"\" )", "args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True) ) COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False)", "next( content for plugin, content in config.items() if plugin.section == section ) class", "= section[\"settings_tag\"] assert args == () assert kwargs[\"top\"] == \"top level value\" assert", "\"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\" ) with", "\"top level value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but should be there in", "value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test is provided by", "!LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool random_things: foo: bar \"\"\" ) with", "of arguments *after* YAML evaluation self.final_args = args self.final_kwargs = kwargs COBalDLoader.add_constructor( tag=\"!TagTrackerEager\",", "level value\" \"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert", "kwargs = section[\"settings_tag\"] assert args == () assert kwargs[\"top\"] == \"top level value\"", "# are not necessarily complete. class TagTracker: \"\"\"Helper to track the arguments supplied", "\"\"\" pipeline: - !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level value\" nested: -", "YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as", "# the state of arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs =", "test pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for testing", "as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf:", "1.0 \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_mixed_creation(self): \"\"\"Load a", ") COBalDLoader.add_constructor( tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False) ) def get_config_section(config: dict, section: str): return next(", "in config.items() if plugin.section == section ) class TestYamlConfig: def test_load(self): \"\"\"Load a", "\"\"\"Forbid loading a YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "def test_load_tag_settings(self): \"\"\"Load !Tags with decorator settings\"\"\" # __yaml_tag_test is provided by the", "assert args == () assert kwargs[\"top\"] == \"top level value\" assert kwargs[\"nested\"] ==", "assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert", "- !MockPool __config_test: settings_tag: !__yaml_tag_test top: \"top level value\" nested: - leaf: \"leaf", "import NamedTemporaryFile import pytest import copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import", "TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "\"leaf level value\" \"\"\" ) with load(config.name) as config: section = get_config_section(config, \"__config_test\")", "test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config:", "data should exist immediately assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] ==", "False def test_load_dangling(self): \"\"\"Forbid loading a YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "=> only some data should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\"", "assert True assert True def test_load_invalid(self): \"\"\"Load a invalid YAML config (invalid keyword", "config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert", "YAML config with mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert", "load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] ==", "cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\" ) with load(config.name) as config:", "top_eager: !TagTrackerEager nested: - leaf: \"leaf level value\" - leaf_lazy: !TagTrackerLazy nested: -", "== 1234 assert tagged.final_kwargs[\"algorithm\"] == \"HS256\" assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\" assert tagged.final_kwargs[\"users\"][0][\"scopes\"] ==", "immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] == [] # ...but", "!TagTrackerLazy nested: - leaf: \"leaf level value\" \"\"\" ) with load(config.name) as config:", "register test pool as safe for YAML configurations COBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool)) # Helpers for", "!MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256 users: - user_name:", "1234 algorithm: HS256 users: - user_name: tardis scopes: - user:read \"\"\" ) with", "value\"}] def test_load_tags_nested(self): \"\"\"Load !Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "1.1 - !MockPool \"\"\" ) with load(config.name): assert True assert True def test_load_invalid(self):", "decorator settings\"\"\" # __yaml_tag_test is provided by the cobald package with NamedTemporaryFile(suffix=\".yaml\") as", "low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\" ) with load(config.name) as config: pipeline", "level value\" - leaf_lazy: !TagTrackerLazy nested: - leaf: \"leaf level value\" \"\"\" )", "\"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with substructure\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name,", "the arguments supplied to YAML !Tags\"\"\" def __init__(self, *args, **kwargs): # the state", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top: \"top", "pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top: \"top level value\" nested: - leaf:", "- !MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid", "# Helpers for testing lazy/eager YAML evaluation # Since YAML defaults to lazy", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host:", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top:", "assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using them\"\"\"", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" logging: version: 1.0 \"\"\" ) with pytest.raises(ConfigurationError):", "- !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\" ) with load(config.name): assert", "config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "data should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"] ==", "value\" \"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged,", "LinearController from ...mock.pool import MockPool # register test pool as safe for YAML", "load(config.name) as config: section = get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert args", "high_allocation: 1.1 - !MockPool random_things: foo: bar \"\"\" ) with pytest.raises(ConfigurationError): with load(config.name):", "load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading a YAML config with dangling content\"\"\"", "arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the state", "argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "def test_load_dangling(self): \"\"\"Forbid loading a YAML config with dangling content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "\"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => all data should exist immediately", "pytest import copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor", "with load(config.name): assert False def test_load_dangling(self): \"\"\"Forbid loading a YAML config with dangling", "import pytest import copy from cobald.daemon.config.mapping import ConfigurationError from cobald.daemon.core.config import load, COBalDLoader,", "level value\" \"\"\" ) with load(config.name) as config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] #", "for plugin, content in config.items() if plugin.section == section ) class TestYamlConfig: def", "content for plugin, content in config.items() if plugin.section == section ) class TestYamlConfig:", "test_load_mixed_creation(self): \"\"\"Load a YAML config with mixed pipeline step creation methods\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert tagged.final_kwargs[\"algorithm\"]", "with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy", "\"\"\" pipeline: - __type__: cobald.controller.linear.LinearController low_utilisation: 0.9 high_allocation: 0.9 - !MockPool \"\"\" )", "algorithm: HS256 users: - user_name: tardis scopes: - user:read \"\"\" ) with load(config.name)", "assert top_eager.orig_kwargs[\"nested\"][0] == { \"leaf\": \"leaf level value\" } leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"] #", "!Tags with nested !Tags\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "supplied to YAML !Tags\"\"\" def __init__(self, *args, **kwargs): # the state of arguments", "write_stream.write( \"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf level", "!LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\" ) with load(config.name): assert True", "\"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\" assert tagged.final_kwargs[\"port\"] == 1234 assert", "value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with substructure, lazily using", "section = get_config_section(config, \"__config_test\") args, kwargs = section[\"settings_tag\"] assert args == () assert", "of arguments *during* YAML evaluation self.orig_args = copy.deepcopy(args) self.orig_kwargs = copy.deepcopy(kwargs) # the", "in the end assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tags_nested(self): \"\"\"Load", "!Tags with decorator settings\"\"\" # __yaml_tag_test is provided by the cobald package with", "!LinearController low_utilisation: 0.9 foo: 0 - !MockPool \"\"\" ) with pytest.raises(TypeError): with load(config.name):", "write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 high_allocation: 1.1 - !MockPool \"\"\" )", "\"\"\" ) with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker)", "\"\"\" pipeline: - !MockPool __config_test: top_eager: !TagTrackerEager nested: - leaf: \"leaf level value\"", ") class TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML config\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as", "section: str): return next( content for plugin, content in config.items() if plugin.section ==", "open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !LinearController low_utilisation: 0.9 foo: 0", "config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => only", "cobald.daemon.core.config import load, COBalDLoader, yaml_constructor from cobald.controller.linear import LinearController from ...mock.pool import MockPool", "- !LinearController low_utilisation: 0.9 foo: 0 - !MockPool \"\"\" ) with pytest.raises(TypeError): with", "!MockPool __config_test: tagged: !TagTrackerLazy top: \"top level value\" nested: - leaf: \"leaf level", "with load(config.name) as config: tagged = get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager", "tagged: !TagTrackerEager host: 127.0.0.1 port: 1234 algorithm: HS256 users: - user_name: tardis scopes:", "\"w\") as write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager host: 127.0.0.1", "get_config_section(config, \"__config_test\")[\"tagged\"] assert isinstance(tagged, TagTracker) # eager loading => only some data should", "to lazy evaluation, the arguments available during evaluation # are not necessarily complete.", "assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] def test_load_tag_settings(self): \"\"\"Load !Tags with decorator", "them\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream: write_stream.write( \"\"\" pipeline:", "eager=False) ) def get_config_section(config: dict, section: str): return next( content for plugin, content", "if plugin.section == section ) class TestYamlConfig: def test_load(self): \"\"\"Load a valid YAML", "tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load", "write_stream: write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top: \"top level value\"", "def __init__(self, *args, **kwargs): # the state of arguments *during* YAML evaluation self.orig_args", "== [\"user:read\"] def test_load_tags_eager(self): \"\"\"Load !Tags with substructure, immediately using them\"\"\" with NamedTemporaryFile(suffix=\".yaml\")", "\"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerLazy top: \"top level value\" nested: -", "pytest.raises(ConfigurationError): with load(config.name): assert False def test_load_missing(self): \"\"\"Forbid loading a YAML config with", "[{\"leaf\": \"leaf level value\"}] assert tagged.orig_kwargs == tagged.final_kwargs def test_load_tags_lazy(self): \"\"\"Load !Tags with", "config (invalid keyword argument)\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with open(config.name, \"w\") as write_stream:", "\"\"\"Forbid loading a YAML config with missing content\"\"\" with NamedTemporaryFile(suffix=\".yaml\") as config: with", "write_stream.write( \"\"\" pipeline: - !MockPool __config_test: tagged: !TagTrackerEager top: \"top level value\" nested:", "config: top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"] # eager tags are evaluated eagerly assert top_eager.orig_kwargs[\"nested\"][0]", "get_config_section(config, \"pipeline\") assert isinstance(pipeline[0], LinearController) assert isinstance(pipeline[0].target, MockPool) def test_load_tags_substructure(self): \"\"\"Load !Tags with", "some data should exist immediately... assert tagged.orig_kwargs[\"top\"] == \"top level value\" assert tagged.orig_kwargs[\"nested\"]" ]
[ "the remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user,", "import api, fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote Control\"", "\"_public\" if remote.public else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote in", "s._default_name()) access_token = fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token() ) public =", "Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda", "code pointing to the remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\",", "def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char(", "coding: utf-8 -*- import base64 import uuid from io import BytesIO import qrcode", "to the remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self:", "fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token()", "help=\"Access this URL to control the playback remotely.\" ) qr = fields.Binary(\"QR Code\",", "default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote", "= \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name =", "Token\", index=True, default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url = fields.Char(", "BytesIO import qrcode from odoo import api, fields, models class MusicRemote(models.Model): _name =", "default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\",", "fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to control the", "self: img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def", "name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char( \"Access Token\", index=True, default=lambda", "self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in", "\"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\",", "for remote in self: img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr", "fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to the remote URL.\") user_id = fields.Many2one(", "pointing to the remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda", "format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for remote in self: remote.access_token = uuid.uuid4().hex", "= fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to control the playback remotely.\" )", "_compute_qr(self): for remote in self: img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\")", "uuid from io import BytesIO import qrcode from odoo import api, fields, models", "_compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\"", "remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, )", "compute=\"_compute_url\", help=\"Access this URL to control the playback remotely.\" ) qr = fields.Binary(\"QR", "index=True, default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\",", "default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to control the playback", "fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self):", "\"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for", "\"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format(", "else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote in self: img =", "qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to the remote URL.\") user_id", "self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self:", "uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char( \"Access Token\", index=True,", "_description = \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name", "@api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url =", "\"Access Token\", index=True, default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url =", "user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\")", "help=\"QR code pointing to the remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True,", "remote in self: img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr =", "compute=\"_compute_qr\", help=\"QR code pointing to the remote URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\",", "for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\",", "base64 import uuid from io import BytesIO import qrcode from odoo import api,", "fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token =", "img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for remote in self: remote.access_token =", "return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token", "qrcode from odoo import api, fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description", "to control the playback remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code", "the playback remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to", "= fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False)", "import qrcode from odoo import api, fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\"", "return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char( \"Access Token\",", "\"oomusic.remote\" _description = \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex", "s: s._default_name()) access_token = fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token() ) public", "= \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self):", "ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for", "public = fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to", "= \"oomusic.remote\" _description = \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return", "= self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public", "if remote.public else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote in self:", "remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to the remote", ") @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url", ") public = fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL", "= fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def", "= fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char( \"Access Token\", index=True, default=lambda s:", "remote.public else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote in self: img", ") @api.depends(\"url\") def _compute_qr(self): for remote in self: img = qrcode.make(remote.url) img_tmp =", "= qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for", "s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access", "_name = \"oomusic.remote\" _description = \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self):", "URL.\") user_id = fields.Many2one( \"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\",", "Code\", compute=\"_compute_qr\", help=\"QR code pointing to the remote URL.\") user_id = fields.Many2one( \"res.users\",", ") qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to the remote URL.\")", "from io import BytesIO import qrcode from odoo import api, fields, models class", "self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else", "remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote in self: img = qrcode.make(remote.url) img_tmp", "def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s:", "io import BytesIO import qrcode from odoo import api, fields, models class MusicRemote(models.Model):", "string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url =", "import BytesIO import qrcode from odoo import api, fields, models class MusicRemote(models.Model): _name", "access_token = fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\",", "in self: img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue())", "-*- import base64 import uuid from io import BytesIO import qrcode from odoo", "models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote Control\" def _default_name(self): return", "MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def", "img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self):", "BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for remote in self: remote.access_token", "url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to control the playback remotely.\"", "in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\", remote.access_token )", "control the playback remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing", "utf-8 -*- import base64 import uuid from io import BytesIO import qrcode from", "_default_name(self): return fields.Date.to_string(fields.Date.context_today(self)) def _default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name())", "base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if", "api, fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote Control\" def", "fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url", "import uuid from io import BytesIO import qrcode from odoo import api, fields,", "from odoo import api, fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description =", "= fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to the remote URL.\") user_id =", "qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for remote", "playback remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR code pointing to the", "_default_access_token(self): return uuid.uuid4().hex name = fields.Char(\"Name\", default=lambda s: s._default_name()) access_token = fields.Char( \"Access", "remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\", remote.access_token ) @api.depends(\"url\") def", "class MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote Control\" def _default_name(self): return fields.Date.to_string(fields.Date.context_today(self))", "default=lambda s: s._default_name()) access_token = fields.Char( \"Access Token\", index=True, default=lambda s: s._default_access_token() )", "base_url, \"_public\" if remote.public else \"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote", "img_tmp = BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for remote in", "fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote Control\" def _default_name(self):", "import base64 import uuid from io import BytesIO import qrcode from odoo import", "required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\")", "def _compute_qr(self): for remote in self: img = qrcode.make(remote.url) img_tmp = BytesIO() img.save(img_tmp,", "fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to control the playback remotely.\" ) qr", "-*- coding: utf-8 -*- import base64 import uuid from io import BytesIO import", "\"res.users\", string=\"User\", required=True, ondelete=\"cascade\", default=lambda self: self.env.user, ) @api.depends(\"access_token\", \"public\") def _compute_url(self): base_url", "@api.depends(\"url\") def _compute_qr(self): for remote in self: img = qrcode.make(remote.url) img_tmp = BytesIO()", "\"URL\", compute=\"_compute_url\", help=\"Access this URL to control the playback remotely.\" ) qr =", "# -*- coding: utf-8 -*- import base64 import uuid from io import BytesIO", "= fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this URL to control", "remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\", remote.access_token", "this URL to control the playback remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\",", "odoo import api, fields, models class MusicRemote(models.Model): _name = \"oomusic.remote\" _description = \"Remote", "self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url, \"_public\" if remote.public else \"\", remote.access_token ) @api.depends(\"url\")", "URL to control the playback remotely.\" ) qr = fields.Binary(\"QR Code\", compute=\"_compute_qr\", help=\"QR", "s._default_access_token() ) public = fields.Boolean(\"Public\", default=False) url = fields.Char( \"URL\", compute=\"_compute_url\", help=\"Access this", "def _compute_url(self): base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\") for remote in self: remote.url = \"{}/oomusic/remote{}/{}\".format( base_url,", "= BytesIO() img.save(img_tmp, format=\"PNG\") remote.qr = base64.b64encode(img_tmp.getvalue()) def action_reset_remote_token(self): for remote in self:", "\"\", remote.access_token ) @api.depends(\"url\") def _compute_qr(self): for remote in self: img = qrcode.make(remote.url)" ]
[ "model displays (template-based) - docking panel support for python objects - iPython console", "<reponame>NelisW/ray-optics<gh_stars>0 \"\"\" package supplying Qt5 desktop application and associated functional support The ``rayoptics.qtgui``", "supplying Qt5 desktop application and associated functional support The ``rayoptics.qtgui`` subpackage provides a", "Anaconda. It also provides a series of higher level interfaces used by rayoptics.", "and associated functional support The ``rayoptics.qtgui`` subpackage provides a desktop app that runs", "level interfaces used by rayoptics. These include: - an interface that hosts matplotlib", "used by rayoptics. These include: - an interface that hosts matplotlib graphics -", "application and associated functional support The ``rayoptics.qtgui`` subpackage provides a desktop app that", "(template-based) - docking panel support for python objects - iPython console window (desktop", "for numeric model displays (template-based) - docking panel support for python objects -", "matplotlib graphics - a table grid for numeric model displays (template-based) - docking", "under Anaconda. It also provides a series of higher level interfaces used by", "support The ``rayoptics.qtgui`` subpackage provides a desktop app that runs under Anaconda. It", "graphics - a table grid for numeric model displays (template-based) - docking panel", "rayoptics. These include: - an interface that hosts matplotlib graphics - a table", "\"\"\" package supplying Qt5 desktop application and associated functional support The ``rayoptics.qtgui`` subpackage", "grid for numeric model displays (template-based) - docking panel support for python objects", "by rayoptics. These include: - an interface that hosts matplotlib graphics - a", "functional support The ``rayoptics.qtgui`` subpackage provides a desktop app that runs under Anaconda.", "series of higher level interfaces used by rayoptics. These include: - an interface", "app that runs under Anaconda. It also provides a series of higher level", "that hosts matplotlib graphics - a table grid for numeric model displays (template-based)", "that runs under Anaconda. It also provides a series of higher level interfaces", "``rayoptics.qtgui`` subpackage provides a desktop app that runs under Anaconda. It also provides", "associated functional support The ``rayoptics.qtgui`` subpackage provides a desktop app that runs under", "- docking panel support for python objects - iPython console window (desktop app", "an interface that hosts matplotlib graphics - a table grid for numeric model", "a desktop app that runs under Anaconda. It also provides a series of", "higher level interfaces used by rayoptics. These include: - an interface that hosts", "desktop application and associated functional support The ``rayoptics.qtgui`` subpackage provides a desktop app", "a table grid for numeric model displays (template-based) - docking panel support for", "docking panel support for python objects - iPython console window (desktop app only)", "panel support for python objects - iPython console window (desktop app only) \"\"\"", "numeric model displays (template-based) - docking panel support for python objects - iPython", "These include: - an interface that hosts matplotlib graphics - a table grid", "interfaces used by rayoptics. These include: - an interface that hosts matplotlib graphics", "package supplying Qt5 desktop application and associated functional support The ``rayoptics.qtgui`` subpackage provides", "interface that hosts matplotlib graphics - a table grid for numeric model displays", "provides a desktop app that runs under Anaconda. It also provides a series", "Qt5 desktop application and associated functional support The ``rayoptics.qtgui`` subpackage provides a desktop", "displays (template-based) - docking panel support for python objects - iPython console window", "a series of higher level interfaces used by rayoptics. These include: - an", "table grid for numeric model displays (template-based) - docking panel support for python", "provides a series of higher level interfaces used by rayoptics. These include: -", "- a table grid for numeric model displays (template-based) - docking panel support", "It also provides a series of higher level interfaces used by rayoptics. These", "The ``rayoptics.qtgui`` subpackage provides a desktop app that runs under Anaconda. It also", "subpackage provides a desktop app that runs under Anaconda. It also provides a", "include: - an interface that hosts matplotlib graphics - a table grid for", "- an interface that hosts matplotlib graphics - a table grid for numeric", "desktop app that runs under Anaconda. It also provides a series of higher", "runs under Anaconda. It also provides a series of higher level interfaces used", "of higher level interfaces used by rayoptics. These include: - an interface that", "hosts matplotlib graphics - a table grid for numeric model displays (template-based) -", "also provides a series of higher level interfaces used by rayoptics. These include:" ]
[ "assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", }", "spacy from assertpy import assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data,", "= pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] * 9, \"words\": [", "\"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\",", "src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, )", "\"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\":", "], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ],", "pd import spacy from assertpy import assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data", "test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\")", "/ \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) ->", "return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\":", "\"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame(", "of rRNA and poly(A)-containing RNA to filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\",", "}, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame( { \"pubtator_id\":", "import assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path:", "{ \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } )", "\"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4 +", "\"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2,", "tokenization to split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], },", "Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def", "from src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data(", "\"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\",", "word_as_dict(word: str) -> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\":", "\"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\":", "\"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\":", "test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20, # This is", "def word_as_dict(word: str) -> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0,", "\"Adsorption of rRNA and poly(A)-containing RNA to filters.\", \"annotations\": [ { \"infons\": {\"identifier\":", ") print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] *", "-> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\":", "nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20, # This is the", "import pandas as pd import spacy from assertpy import assert_that from src.definitions import", "\"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization to split on '-'", "actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\",", "to filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure", "\"passage_id\": [0] * 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\",", "\"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\",", ") assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\",", "parse_passage( passage={ \"offset\": 20, # This is the parameter under test \"text\": \"Adsorption", "preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word:", "[\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"] * 4, } ) pd.testing.assert_frame_equal(left=actual, right=expected)", "= pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return actual[actual.words == word].iloc[0].to_dict()", "\"labels\": [\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"] * 4, } ) pd.testing.assert_frame_equal(left=actual,", "\"type\": \"Chemical\"}, # TODO: configure tokenization to split on '-' \"text\": \"poly(A)\", \"locations\":", "0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( {", "dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\",", "expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual", "\"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\",", "1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0,", "\"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\":", "poly(A)-containing RNA to filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, #", "\"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\":", "\"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to(", "\"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4", "Path import pandas as pd import spacy from assertpy import assert_that from src.definitions", "} ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\":", "pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20,", "\"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\")", "import spacy from assertpy import assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data import", "\"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\":", "\"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\",", "spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20, # This is the parameter under", "expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] * 9, \"words\":", "0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( {", "\"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset():", "\"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"] *", "* 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\",", "\"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\",", "assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path):", "[ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"]", "\"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"]", "\"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\":", "def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path /", "assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", }", "split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], }, ], },", "\"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\":", "# This is the parameter under test \"text\": \"Adsorption of rRNA and poly(A)-containing", "\"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] *", "\"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\":", "\"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected)", "\"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization to", "0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( {", "9, \"passage_id\": [0] * 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\",", "} ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\":", "= pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual =", "PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\",", "pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9,", "output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return", "\"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization to split on '-' \"text\": \"poly(A)\",", "nlp=nlp, ) print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0]", "\"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, )", "\"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\",", "rRNA and poly(A)-containing RNA to filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\":", "{ \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] * 9, \"words\": [ \"Adsorption\", \"of\",", "data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str)", "pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to(", "parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path", "actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return actual[actual.words ==", "str) -> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1,", "actual = parse_passage( passage={ \"offset\": 20, # This is the parameter under test", "and poly(A)-containing RNA to filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"},", "pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] * 9, \"words\": [ \"Adsorption\",", "{ \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization to split on", "7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame( {", "[ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [", "\"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp,", "TODO: configure tokenization to split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\":", "This is the parameter under test \"text\": \"Adsorption of rRNA and poly(A)-containing RNA", ") assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\",", "under test \"text\": \"Adsorption of rRNA and poly(A)-containing RNA to filters.\", \"annotations\": [", "filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization", "import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual", "], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"]", "}, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] *", "\"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected", "RNA to filters.\", \"annotations\": [ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO:", "print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] * 9,", "\"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to(", "9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ],", "} ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp =", "test \"text\": \"Adsorption of rRNA and poly(A)-containing RNA to filters.\", \"annotations\": [ {", "\"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\",", "{ \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } )", "\"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\":", "from pathlib import Path import pandas as pd import spacy from assertpy import", "\"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"] +", "word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\",", "{ \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } )", "to split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], }, ],", "43, \"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected =", "src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT", "assertpy import assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def", "\"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\",", "\"data/test/raw\", output_dir=tmp_path, ) actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) -> dict:", "from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path,", "= spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20, # This is the parameter", "\"pubtator_id\": [\"0\"] * 9, \"passage_id\": [0] * 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\",", "\"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual,", "\"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1,", "20, # This is the parameter under test \"text\": \"Adsorption of rRNA and", "2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT /", "import Path import pandas as pd import spacy from assertpy import assert_that from", "\"PUNCT\", ], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"] * 4, }", "\"locations\": [{\"offset\": 43, \"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual)", "{ \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } )", "\"NOUN\", \"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def", "pandas as pd import spacy from assertpy import assert_that from src.definitions import PROJECT_ROOT", ") assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\",", "pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage(", "\"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\":", "pathlib import Path import pandas as pd import spacy from assertpy import assert_that", "* 9, \"passage_id\": [0] * 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\",", "right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20, #", "import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT /", "on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], }, ], }, pubtator_id=\"0\",", "0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected =", "is the parameter under test \"text\": \"Adsorption of rRNA and poly(A)-containing RNA to", "== word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\":", "\"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp", "as pd import spacy from assertpy import assert_that from src.definitions import PROJECT_ROOT from", "} ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\":", "\"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to(", "\"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\",", "\"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"pyridine\",", "[0] * 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\",", "2, \"words\": \"pyridine\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0,", "\"Chemical\"}, # TODO: configure tokenization to split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\":", "\"offset\": 20, # This is the parameter under test \"text\": \"Adsorption of rRNA", "\"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"]", "[\"0\"] * 9, \"passage_id\": [0] * 9, \"words\": [ \"Adsorption\", \"of\", \"rRNA\", \"and\",", "parameter under test \"text\": \"Adsorption of rRNA and poly(A)-containing RNA to filters.\", \"annotations\":", "\"text\": \"Adsorption of rRNA and poly(A)-containing RNA to filters.\", \"annotations\": [ { \"infons\":", "[{\"offset\": 43, \"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0, nlp=nlp, ) print(actual) expected", "[ { \"infons\": {\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization to split", "configure tokenization to split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}],", "\"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2,", "/ \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={", ") actual = pd.read_csv(tmp_path / \"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return actual[actual.words", "def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\") actual = parse_passage( passage={ \"offset\": 20, # This", "'-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43, \"length\": 7}], }, ], }, pubtator_id=\"0\", passage_id=0,", "\".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\", \"ADP\", \"NOUN\", \"PUNCT\",", "\"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\",", ") expected = pd.read_csv(PROJECT_ROOT / \"data/test/labeled_passages.csv\") pd.testing.assert_frame_equal(left=actual, right=expected) def test_parse_passage_can_handle_global_offset(): nlp = spacy.load(\"en_core_web_sm\")", "1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\": 0,", "\"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"nucleotide\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\":", "], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"] * 4, } )", "from assertpy import assert_that from src.definitions import PROJECT_ROOT from src.main.preprocess_data import preprocess_data, parse_passage", "\"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", } ) assert_that(word_as_dict(\"pyridine\")).is_equal_to( { \"passage_id\":", "\"pubtator_id\": 2, \"words\": \"nucleotide\", \"POS\": \"NOUN\", \"labels\": \"I-Chemical\", } ) expected = pd.read_csv(PROJECT_ROOT", "{\"identifier\": \"MESH:D011061\", \"type\": \"Chemical\"}, # TODO: configure tokenization to split on '-' \"text\":", "passage={ \"offset\": 20, # This is the parameter under test \"text\": \"Adsorption of", "assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\": \"NOUN\", \"labels\": \"B-Disease\", }", "preprocess_data, parse_passage def test_preprocess_data(tmp_path: Path): preprocess_data( data_root=PROJECT_ROOT / \"data/test/raw\", output_dir=tmp_path, ) actual =", "assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"methanol\", \"POS\": \"NOUN\", \"labels\": \"B-Chemical\", }", "\"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\": \"poisoning\", \"POS\":", "/ \"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( {", "the parameter under test \"text\": \"Adsorption of rRNA and poly(A)-containing RNA to filters.\",", "\"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\", \"VERB\", \"PROPN\",", "\"NOUN\", \"PUNCT\", ], \"labels\": [\"O\"] * 4 + [\"B-Chemical\"] + [\"O\"] * 4,", "= parse_passage( passage={ \"offset\": 20, # This is the parameter under test \"text\":", "\"and\", \"poly(A)-containing\", \"RNA\", \"to\", \"filters\", \".\", ], \"POS\": [ \"NOUN\", \"ADP\", \"ADJ\", \"CCONJ\",", "passage_id=0, nlp=nlp, ) print(actual) expected = pd.DataFrame( { \"pubtator_id\": [\"0\"] * 9, \"passage_id\":", "# TODO: configure tokenization to split on '-' \"text\": \"poly(A)\", \"locations\": [{\"offset\": 43,", "\"POS\": \"NOUN\", \"labels\": \"B-Chemical\", } ) assert_that(word_as_dict(\"poisoning\")).is_equal_to( { \"passage_id\": 0, \"pubtator_id\": 1, \"words\":", "\"labeled_passages.csv\") def word_as_dict(word: str) -> dict: return actual[actual.words == word].iloc[0].to_dict() assert_that(word_as_dict(\"methanol\")).is_equal_to( { \"passage_id\":" ]
[ "None) inst = s.model () new_insts.append (inst) print 'New instance:\\n', inst sub =", "= dict () pred_keys = [] for rule_exp in fp.get_rules (): # rule_exp", "(exp).as_expr () # qe_lite followed by qe def full_qe (exp): temp = qe_lite", "to the else_value sort = const.sort () if isinstance (sort, z3.ArraySortRef): val_interp =", "'{}_{}'.format (c.decl ().name (), str (i)) const = z3.Const (name, c.sort ()) curr_exist_consts.append", "in range (exp.num_args ()) : arg = exp.arg (i) if z3.is_app (arg): yield", "return False if exp2 is None and exp1 is not None: return False", "= sort.range () val = z3.K(val_sort, val_interp.else_value ()) for i in range (val_interp.num_entries", "m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) sub.append ( (const, val)", "(ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False)", "q = given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules", "obtain the head unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head", "() witnesses = list () while True: print 'Solver for witness:' for cons", "(z3.substitute (z3.Not (matrix), *sub)) if w.check () == z3.unsat: print 'TRUE\\n', sub if", "curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts)", "instances for the universals given by the user as a # starting point", "None, None) else: if model: return (m, None, None) sub = mk_subst_from_model (m,", "def mk_and (args, ctx=None): if len (args) == 0: return mk_true (ctx=ctx) else:", "# constraints for witness # initialize w with given_insts if given_insts is not", "val_interp = m [const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp):", "for i in range (decl.arity ()): name = startswith + str (i) sort", "matrix) else: return z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t", "vs. 1==const) and to avoid repetitions of terms, we return (None,eq) for the", "impl2 = z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1, impl2) else: return decl", "z3.AstRef.translate that handles sorts and function declarations correctly\"\"\" if x.ctx == ctx: return", "= strip_qblock (exp) matrix = elim_term_ite (matrix) if exp.is_forall (): e = z3.ForAll", "# be hard to eliminate by qe_lite #e = elim_bool_ite (exp) #e =", "'Solving by negating the problem' cex_size = 0 curr_exist_consts = [] curr_matrix_disjs =", "univ_consts and exist_consts # add a new set of exist consts sub =", "model: return (m, None, None) return (sub, None, None) inst = s.model ()", "result for i in range (exp.num_vars ()): e = t (e).as_expr () if", "in pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts) > 0: e = z3.Exists", "for e in done_exp: if e.eq (exp): return # sub-dag is already processed", "(inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues", "for pred in preds.itervalues (): print 'Lemmas for predicate: ', pred n =", "= z3.BoolSort () def z3_translate (x, ctx): \"\"\" A version of z3.AstRef.translate that", "sorts.append (z3_translate (x.range (), ctx)) return z3.Function (x.name (), *sorts) if ctx is", "map (exp_key, post_consts) exist_consts = [] for i in range (len (post_consts)): post_key", "(head_key) preds [head_decl.name ()] = head_decl return preds def print_lemmas (fp): preds =", "(ctx=ctx) else: return z3.And (*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp", "(name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const) ) new_disj = z3.substitute", "z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix) return e if not", "= m [const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp): idx_sort", "if side_cons is not None: for cons in side_cons: s.add (cons) res =", "() (exist_consts, e) = strip_qblock (exp) if not z3.is_quantifier (e): # just an", "print for pred in preds.itervalues (): print 'Lemmas for predicate: ', pred n", "[name] = rule return (q, fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False):", "we yield (1,const==1) and (None,1==const)); # # assume that const only appears in", "res = s.check () if res == z3.unsat: return mk_false (ctx=exp.ctx) m =", "mk_fresh_args (decl, startswith=''): args = [] for i in range (decl.arity ()): name", "for sel in insert_and_yield (exp): yield sel def extract_consts (exp): res = []", "def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q =", "validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds = get_preds (given_fp)", "= m.eval (const, model_completion=model_completion) eqs.append (const == val) return eqs def qe_array (exp):", ": {}'.format (i, fp.get_cover_delta (i, pred)) print '{} : {}'.format ('oo', fp.get_cover_delta (-1,", "exp) def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix", "= s.check () if res == z3.unsat: return mk_false (ctx=exp.ctx) m = s.model", "sel in insert_and_yield (exp): yield sel def extract_consts (exp): res = [] for", "(exp): return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed by", "()): # args are the array and the idx for sel in unique_selects", "done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k = exp_key (e)", "fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q", "(matrix), *sub)) if w.check () == z3.unsat: print 'TRUE\\n', sub if model: return", "be hard to eliminate by qe_lite #e = elim_bool_ite (exp) #e = elim_term_ite", "= z3.Exists (qvars, matrix) return e if not z3.is_bool (exp): return exp if", "elif z3.is_app (exp): for i in range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const", "return (None, None, None) else: if model: return (m, None, None) sub =", "curr_exist_consts.append (const) sub.append ( (c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub)", "== z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts, witnesses) m = w.model ()", "in unique_const_leaves (exp): res.append (c) return res def mk_const_variant (const, variant): name =", "z3.is_var (l): yield l def exp_has_const_leaf (exp, l): for m in unique_const_leaves (exp):", "m.eval (const, model_completion=model_completion) sub.append ( (const, val) ) return sub def mk_eqs_from_model (m,", "print 'Univ consts:', univ_consts print 'Matrix:', matrix print 'Solving by negating the problem'", "ctx is None: return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list (l,", "if exp2 is None and exp1 is not None: return False return exp_key", "for pred in given_preds.itervalues (): fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules ()):", "a cex of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not (matrix), #", "l: new_p = (z3_translate (a, ctx), z3_translate (b, ctx)) res.append (new_p) return res", "isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain () val_sort = sort.range () val", "(e): found = False for t in eq_terms: if z3.eq (t,e): found =", "print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp, lvl,", "(e) return True return False def process_eq (e1, e2): if z3.eq (e1, const):", "False for l in unique_const_leaves (exp): if z3.eq (l, const): found = True", "z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1", "(c.decl ().name (), str (cex_size)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const)", "any of the witnesses already works for m in witnesses: w = z3.Solver", "(1) if has_const (arg1, const): arg0,arg1 = arg1,arg0 # swap if has_const (arg0,", "ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds", "'r'+str(i) fp.add_rule (rule, name=name) rules [name] = rule return (q, fp, rules) def", "boolean structure of exp) def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) =", "(fp): preds = dict () pred_keys = [] for rule_exp in fp.get_rules ():", "val = z3.Store (val, entry.arg_value (0), entry.value ()) else: val = m.eval (const,", "if exp1 is None and exp2 is None: return True if exp1 is", "ctx=exp.ctx) e = t (exp).as_expr () # tactic introduces new constants which need", "the # constraints 'side_cons' on the free variables; # # let exp =", "yield sel if z3.is_select (exp): for sel in insert_and_yield (exp): yield sel def", "generate_proof_trace=False) return fp def strip_qblock (expr): if not z3.is_quantifier (expr): return ([], expr)", "= z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is not None: for c in", "() def z3_translate (x, ctx): \"\"\" A version of z3.AstRef.translate that handles sorts", "z3.eq (arg1, const): if is_add: ret_val = z3.simplify (e2-arg0) else: ret_val = z3.simplify", "leaf def unique_const_leaves (exp): for l in unique_leaves (exp): if not z3.is_var (l):", "t (exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock", "dict () # map from names to rules if not pp: print 'No", "(exp): res = [] for c in unique_const_leaves (exp): res.append (c) return res", "exp.arg (i) if z3.is_app (arg): yield arg.decl () def has_const (exp, const): found", "(const, exp, eq_terms=None, done_exp=None): def insert (e): found = False for t in", "z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix) = strip_qblock (qf_exp) if", "to worry about And and Or because they can take >2 args and", "', pred n = fp.get_num_levels (pred) for i in range (n): print '{}", "not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of", "mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None): if len", "appearing in exp; # # to accommodate for alternative representations of the same", "for quant. alternation; # given_insts is a list of instances for the universals", "in eq_terms: if z3.eq (t,e): found = True break if not found: eq_terms.append", "(None, None, None) else: if model: return (m, None, None) sub = mk_subst_from_model", "= [] for l in range (lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta", "None: for inst in given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute", "univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like above, but looks for counterexamples", "And and Or because they can take >2 args and # decl(*args) doesn't", "exp1 is None and exp2 is None: return True if exp1 is None", "(engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr): if not z3.is_quantifier (expr): return", "the head unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head =", "(slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred", "for c in side_cons: s.add (c) res = s.check () if res ==", "eliminate by qe_lite #e = elim_bool_ite (exp) #e = elim_term_ite (e) return e", "idx_sort = sort.domain () val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value ())", "(exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model is", "(slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q =", "(exp) matrix = elim_bool_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix)", "print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix print 'Solving", "return exp if z3.is_true (exp) or z3.is_false (exp): return exp assert z3.is_app (exp)", "yield (1,const==1) and (None,1==const)); # # assume that const only appears in simple", "() # map from names to rules if not pp: print 'No pre-processing'", "variant) sub.append ( (const, const_variant) ) return sub def mk_fresh_args (decl, startswith=''): args", "matrix.arg (1) else: head = matrix assert head is not None # obtain", "mk_false (ctx=exp.ctx) m = s.model () sub = mk_subst_from_model (m, qvars, model_completion=True) return", "e) = strip_qblock (exp) if not z3.is_quantifier (e): # just an smt problem", "(arg0, const): t = process_eq (arg0, arg1) if t is not None: if", "= mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like above, but", "else: e = z3.Exists (qvars, matrix) return e if not z3.is_bool (exp): return", "# sampling based method for quant. alternation; # given_insts is a list of", "if z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf in", "3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert (e): found =", "w_cons = [] # constraints for witness # initialize w with given_insts if", "exp) elif z3.is_app (exp): for i in range (exp.num_args ()): for (t,eq) in", "unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k = exp_key (e) if k not", "Z3 # ############################################ import z3 I = z3.IntSort () B = z3.BoolSort ()", "e # sampling based method for quant. alternation; # given_insts is a list", "const.sort ()) def mk_exp_variant_sub (exp, variant): sub = [] for const in unique_const_leaves", "range (decl.arity ()): name = startswith + str (i) sort = decl.domain (i)", "U1,U2,..U_cex_size. Forall E. Not (matrix), # where U and E are univ_consts and", "(e) if m is None: return (None, None, None) else: if model: return", "None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 = e1.arg", "def translate_pair_list (l, ctx): res = [] for (a,b) in l: new_p =", "return z3.substitute (matrix, *sub) def nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return", "original set of # universal variables return (None, cex_model, witnesses) else: # no", "None # obtain head_decl head_decl = head.decl () # ensure head_decl is in", "= z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 = e1.arg (1) if", "([v], matrix)).as_expr () else: matrix = t (z3.Exists ([v], matrix)).as_expr () e =", "sub-dag is already processed if z3.is_eq (exp): arg0 = exp.arg (0) arg1 =", "unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1)", "rules = dict () # map from names to rules if not pp:", "(x.domain (i), ctx) for i in range (x.arity ())] sorts.append (z3_translate (x.range (),", "leaf_keys: leaf_keys.append (k) yield e if leaf_keys is None: leaf_keys = [] if", "if not z3.is_quantifier (exp): return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () #", "else: e = z3.Exists (qvars, matrix) return e pre_consts = extract_consts (exp) pre_const_keys", "t (exp).as_expr () def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp):", "# add a new set of exist consts sub = [] for c", "a # starting point def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall exp:',", "appears in another equality term # (so, we can't handle \"(const==0)==b\", \"ite (const==0,", "for better result for i in range (exp.num_vars ()): e = t (e).as_expr", "side_cons is not None: for c in side_cons: s.add (c) res = s.check", "() (qvars, matrix) = strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix) if", "res == z3.sat: return s.model () else: return None def mk_subst_from_model (m, consts,", "return exp is_forall = False if exp.is_forall (): is_forall = True (qvars, matrix)", "() while True: print 'Solver for witness:' for cons in w_cons: print cons.sexpr", "z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = cofactor_term_ite (matrix) if exp.is_forall", "qvars consistent with (matrix /\\ side_cons) # the under-approx. is obtained as \"matrix", "else: assert z3.is_var (exp) for leaf in insert_and_yield (exp): yield leaf def unique_const_leaves", "= exp_key (e) if k not in leaf_keys: leaf_keys.append (k) yield e if", "validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: print 'No pre-processing' fp.set (slice=False)", "existentially quantified fml) under the # constraints 'side_cons' on the free variables; #", "(e): # just an smt problem m = check_sat (e) if m is", "const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists", "# (so, we can't handle \"(const==0)==b\", \"ite (const==0, x, y) == 3\", etc.)", "SIZE:', cex_size+1 # look for a cex of size 'cex_size' # Exists U1,U2,..U_cex_size.", "QF predicate instance # obtain the head unused_, matrix = strip_qblock (rule_exp) if", "strip_qblock (expr): if not z3.is_quantifier (expr): return ([], expr) consts = list ()", "'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix w_cons = []", "eqs = [] for const in consts: # treat arrays specially due to", "not z3.is_quantifier (exp): return exp e = exp t = z3.Tactic ('qe-light', ctx=exp.ctx)", "worry about And and Or because they can take >2 args and #", "= matrix.arg (1) else: head = matrix assert head is not None #", "z3.And (*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx)", "Exists U1,U2,..U_cex_size. Forall E. Not (matrix), # where U and E are univ_consts", "= cofactor_term_ite (exp) e = elim_bool_ite (e) # Alternatively, we could have done", "= strip_qblock (exp) if not z3.is_quantifier (e): # just an smt problem m", "False if exp2 is None and exp1 is not None: return False return", "matrix) else: e = z3.Exists (qvars, matrix) return e pre_consts = extract_consts (exp)", "arguments of exp # if arguments contain de-bruijn variables, they are ignored def", "exp_key (e): return e.ast.value def match_exp (exp1, exp2): if exp1 is None and", "(matrix, *sub)).sexpr () res = s.check () if res == z3.unsat: print 'TRUE\\n',", "range (exp.num_args ()) : arg = exp.arg (i) if z3.is_app (arg): yield arg.decl", "if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx) for i in", "e.eq (exp): return # sub-dag is already processed if z3.is_eq (exp): arg0 =", "(exp) or z3.is_false (exp): return exp assert z3.is_app (exp) decl = exp.decl ()", "witnesses = list () while True: print 'Solver for witness:' for cons in", "post_consts = extract_consts (e) post_const_keys = map (exp_key, post_consts) exist_consts = [] for", "# Some utility routines for Z3 # ############################################ import z3 I = z3.IntSort", "e = t (e).as_expr () if not z3.is_quantifier (e): return e if z3.is_quantifier", "def mk_fresh_args (decl, startswith=''): args = [] for i in range (decl.arity ()):", "return None def mk_subst_from_model (m, consts, model_completion=False): sub = [] for const in", "to accommodate for alternative representations of the same equality (e.g. const==1 # vs.", "matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite (exp):", "= exp t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once per quantified", "elim_term_ite (e) return e # sampling based method for quant. alternation; # given_insts", "sub = mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None, None) else: assert e.is_forall", "(exp): if not z3.is_quantifier (exp): return exp e = exp t = z3.Tactic", "iterator for all equality terms on const; # # each pair (t,eq) is", "z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_bool_ite (matrix) if exp.is_forall", "(*args) elif z3.is_or (exp): return z3.Or (*args) elif is_ite (exp): impl1 = z3.Implies", "def qe (exp): if not z3.is_quantifier (exp): return exp return z3.Tactic ('qe', ctx=exp.ctx)", "insert_and_yield (e): k = exp_key (e) if k not in sel_keys: sel_keys.append (k)", "pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False)", "caveat that # elim_term_ite introduces new existentially quantified variables which can # be", "leaf_keys=None): def insert_and_yield (e): k = exp_key (e) if k not in leaf_keys:", "(exp).as_expr () def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return", "() res = s.check () if res == z3.unsat: print 'TRUE\\n', sub if", "else: yield (None, exp) elif z3.is_app (exp): for i in range (exp.num_args ()):", "for (a,b) in l: new_p = (z3_translate (a, ctx), z3_translate (b, ctx)) res.append", "(t, eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k =", "t (z3.Exists ([v], matrix)).as_expr () e = matrix return e def qe (exp):", "that const only appears in simple arithmetic terms and the coefficient # of", "duplicates (in the above example, we yield (1,const==1) and (None,1==const)); # # assume", "(ctx=ctx) if z3.is_arith_sort (x): if x.is_int (): return z3.IntSort (ctx=ctx) else : assert", "arrays specially due to the else_value sort = const.sort () if isinstance (sort,", "for i in range (exp.num_args ()) : arg = exp.arg (i) if z3.is_app", "model m of qvars consistent with (matrix /\\ side_cons) # the under-approx. is", "cex of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not (matrix), # where", "is not None) and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort =", "/\\ side_cons) # the under-approx. is obtained as \"matrix [m/qvars]\" # # this", "= decl.domain (i) args.append (z3.Const (name, sort)) return args # check if fml", "problem m = check_sat (e) if m is None: return (None, None, None)", "curr_matrix_disjs = [] for i in range (cex_size): sub = [] for c", "z3.substitute_vars (expr.body (), *consts) return (consts, matrix) def get_preds (fp): preds = dict", "e = z3.Exists (exist_consts, e) return qe_lite (e) # obtain an under-approx of", "fp.get_num_levels (pred) for i in range (n): print '{} : {}'.format (i, fp.get_cover_delta", "(z3_translate (a, ctx), z3_translate (b, ctx)) res.append (new_p) return res def mk_true (ctx=None):", "because they can take >2 args and # decl(*args) doesn't seem to work", "const): t = process_eq (arg0, arg1) if t is not None: if insert", "in unique_leaves (exp): if z3.is_var (l): yield l def exp_has_const_leaf (exp, l): for", "(const, val) ) return sub def mk_eqs_from_model (m, consts, model_completion=False): eqs = []", "ret_val = z3.simplify (arg0-e2) else: if is_add: ret_val = process_eq (arg0, e2-arg1) else:", "variable, separately (qvars, matrix) = strip_qblock (e) for v in qvars: if exp.is_forall", "(exp): if z3.eq (l, const): found = True break return found # iterator", "None: sel_keys = [] # post-order if z3.is_app (exp): for i in range", "# args are the array and the idx for sel in unique_selects (exp.arg", "res = z3.Not (matrix) else: res = qf_exp return res def qe_lite (exp):", "size; # in other words, we solve the negation of the given problem,", "as a # starting point def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall", "alternation; # given_insts is a list of instances for the universals given by", "v in qvars: if exp.is_forall (): matrix = t (z3.ForAll ([v], matrix)).as_expr ()", "of exist consts sub = [] for c in univ_consts: name = '{}_{}'.format", "exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix w_cons = [] # constraints", "(a,b) in l: new_p = (z3_translate (a, ctx), z3_translate (b, ctx)) res.append (new_p)", "(cex_size)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const)", "entry = val_interp.entry (i) val = z3.Store (val, entry.arg_value (0), entry.value ()) else:", "'CURRENT SIZE:', cex_size+1 # look for a cex of size 'cex_size' # Exists", "py interface if z3.is_and (exp): return z3.And (*args) elif z3.is_or (exp): return z3.Or", "else: return decl (*args) def elim_ite (exp): e = cofactor_term_ite (exp) e =", ") return sub def mk_fresh_args (decl, startswith=''): args = [] for i in", "# like above, but looks for counterexamples of increasing size; # in other", "is not None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO: split cex_model", "ctx)) return z3.Function (x.name (), *sorts) if ctx is None: return x.translate (ctx=z3.main_ctx", "mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx)", "expr) consts = list () for i in reversed (range (expr.num_vars ())): v_name", "in leaf_keys: leaf_keys.append (k) yield e if leaf_keys is None: leaf_keys = []", "correctly\"\"\" if x.ctx == ctx: return x if isinstance (x, z3.BoolSortRef): return z3.BoolSort", "matrix) = strip_qblock (exp) matrix = elim_bool_ite (matrix) if exp.is_forall (): e =", "ctx=None): if len (args) == 0: return mk_true (ctx=ctx) else: return z3.And (*args)", "preds.itervalues (): print 'Lemmas for predicate: ', pred n = fp.get_num_levels (pred) for", "invoke qe_lite for each variable, separately (qvars, matrix) = strip_qblock (e) for v", "(e) print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix w_cons", "(e) # obtain an under-approx of exp (an existentially quantified fml) under the", "False return exp_key (exp1) == exp_key (exp2) # iterator for declarations of arguments", "else: if is_add: ret_val = process_eq (arg0, e2-arg1) else: ret_val = process_eq (arg0,", "return (consts, matrix) def get_preds (fp): preds = dict () pred_keys = []", "is None: return True if exp1 is None and exp2 is not None:", "(const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) eqs.append (const == val) return", "range (cex_size): sub = [] for c in univ_consts: name = '{}_{}'.format (c.decl", "s.add (cons) res = s.check () if res == z3.sat: return s.model ()", "def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix =", "= z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp,", "z3.ForAll (qvars, z3.Not (matrix)) else: res = z3.Not (matrix) else: res = qf_exp", "x.translate (ctx) def translate_pair_list (l, ctx): res = [] for (a,b) in l:", "s.check () if res == z3.sat: return s.model () else: return None def", "has_const (arg1, const): arg0,arg1 = arg1,arg0 # swap if has_const (arg0, const): t", "(ctx=exp.ctx) m = s.model () sub = mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute", "(exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite (exp):", "print 'CURRENT SIZE:', cex_size+1 # look for a cex of size 'cex_size' #", "of qvars consistent with (matrix /\\ side_cons) # the under-approx. is obtained as", "mk_subst_from_model (m, consts, model_completion=False): sub = [] for const in consts: # treat", "exp if z3.is_true (exp) or z3.is_false (exp): return exp assert z3.is_app (exp) decl", "# vs. 1==const) and to avoid repetitions of terms, we return (None,eq) for", "c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (i)) const =", "= matrix return e def qe (exp): if not z3.is_quantifier (exp): return exp", "res = s.check () if res == z3.unsat: print 'TRUE\\n', sub if model:", "pre_consts = extract_consts (exp) pre_const_keys = map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite',", "(exp.arg (i), leaf_keys): yield leaf else: assert z3.is_var (exp) for leaf in insert_and_yield", "name = '{}_{}'.format (const.decl ().name (), variant) return z3.Const (name, const.sort ()) def", "(new_disj) while True: print 'CURRENT SIZE:', cex_size+1 # look for a cex of", "mk_const_variant (const, variant) sub.append ( (const, const_variant) ) return sub def mk_fresh_args (decl,", "post_const_keys [i] if post_key not in pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts)", "*sub) def nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr ()", "given_preds = get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules = dict () #", "= elim_bool_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e", "for cons in w_cons: print cons.sexpr () w = z3.Solver (ctx=exp.ctx) for cons", "(pred) + 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify", "expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body (), *consts) return", "and (None,1==const)); # # assume that const only appears in simple arithmetic terms", "None) else: assert e.is_forall () (univ_consts, matrix) = strip_qblock (e) print 'Exist consts:',", "', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver for validity:',", "(exp.num_args ()): for leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf else: assert", "t = process_eq (arg0, arg1) if t is not None: if insert (t):", "yield (t, eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k", "(exp): yield leaf def unique_const_leaves (exp): for l in unique_leaves (exp): if not", "and exp1 is not None: return False return exp_key (exp1) == exp_key (exp2)", "where U and E are univ_consts and exist_consts # add a new set", "z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model", "TODO: split cex_model into list of models for the original set of #", "# iterator for declarations of arguments of exp # if arguments contain de-bruijn", "z3.Solver (ctx=exp.ctx) print 'checking validity of ', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute", "z3.substitute (matrix, *sub) def nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return t", "new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:',", "> 0: e = z3.Exists (exist_consts, e) return qe_lite (e) # obtain an", "val) ) return sub def mk_eqs_from_model (m, consts, model_completion=False): eqs = [] for", "ctx=exp.ctx) # invoke qe_lite once per quantified variable, for better result for i", "(exp): for l in unique_leaves (exp): if not z3.is_var (l): yield l def", "l def exp_has_const_leaf (exp, l): for m in unique_const_leaves (exp): if l.eq (m):", "(q, fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx)", "print 'checking validity of ', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub)))", "is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 = e1.arg (1)", "E. Not (matrix), # where U and E are univ_consts and exist_consts #", "(0) arg1 = e1.arg (1) if z3.eq (arg1, const): if is_add: ret_val =", "(exp, given_insts=None, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not", "obtained as \"matrix [m/qvars]\" # # this is the weakest under-approx. if side_cons", "in consts: # treat arrays specially due to the else_value sort = const.sort", "*sub)) new_insts = list () witnesses = list () while True: print 'Solver", "(decl.arity ()): name = startswith + str (i) sort = decl.domain (i) args.append", "\"matrix [m/qvars]\" # # this is the weakest under-approx. if side_cons is a", "return (None, cex_model, witnesses) else: # no cex of current size # check", "consts sub = [] for c in univ_consts: name = '{}_{}'.format (c.decl ().name", "unique_leaves (exp): if z3.is_var (l): yield l def exp_has_const_leaf (exp, l): for m", "model=True) if cex_model is not None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1 #", "is_forall: (qvars, matrix) = strip_qblock (qf_exp) if len (qvars) > 0: res =", "for alternative representations of the same equality (e.g. const==1 # vs. 1==const) and", "None) return (sub, None, None) inst = s.model () new_insts.append (inst) print 'New", "arg1) if t is not None: if insert (t): yield (t, exp) else:", "equivalent to (const==t) # appearing in exp; # # to accommodate for alternative", "(t,eq) is such that eq is an equality logically equivalent to (const==t) #", "mk_exp_variant_sub (exp, variant): sub = [] for const in unique_const_leaves (exp): const_variant =", "= [z3_translate (x.domain (i), ctx) for i in range (x.arity ())] sorts.append (z3_translate", "is a list of instances for the universals given by the user as", "(e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0)", "(qvars, matrix) = strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons", "mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:', sub # check if the witness", "handles sorts and function declarations correctly\"\"\" if x.ctx == ctx: return x if", "z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall:", "(fp) print for pred in preds.itervalues (): print 'Lemmas for predicate: ', pred", "()) return x.translate (ctx) def translate_pair_list (l, ctx): res = [] for (a,b)", "if sel_keys is None: sel_keys = [] # post-order if z3.is_app (exp): for", "= m.eval (const, model_completion=model_completion) sub.append ( (const, val) ) return sub def mk_eqs_from_model", "sort = const.sort () if isinstance (sort, z3.ArraySortRef): val_interp = m [const] if", "z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model is not", "not z3.is_bool (exp): return exp if z3.is_true (exp) or z3.is_false (exp): return exp", "# look for a cex of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E.", "declarations correctly\"\"\" if x.ctx == ctx: return x if isinstance (x, z3.BoolSortRef): return", "like above, but looks for counterexamples of increasing size; # in other words,", "z3.is_quantifier (exp) assert not exp.is_forall () (qvars, matrix) = strip_qblock (exp) s =", "'Matrix:', matrix print 'Solving by negating the problem' cex_size = 0 curr_exist_consts =", "head unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg", "(exp)): for leaf in insert_and_yield (exp): yield leaf elif z3.is_app (exp): for i", "impl1 = z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2]) return z3.And", "= [] for rule_exp in fp.get_rules (): # rule_exp is a quantified formula", "assert e.is_forall () (univ_consts, matrix) = strip_qblock (e) print 'Exist consts:', exist_consts print", "(post_consts [i]) if len (exist_consts) > 0: e = z3.Exists (exist_consts, e) return", "if x.ctx == ctx: return x if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx)", "= True break return found # iterator for all equality terms on const;", "() def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of", "(0) arg1 = exp.arg (1) if has_const (arg1, const): arg0,arg1 = arg1,arg0 #", "(args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1, impl2) else:", "negation of the given problem, looking for # counterexamples of increasing size, similar", "pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas)) # doesn't seem to", "c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const) ) new_disj = z3.substitute (z3.Not", "strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head =", "(), variant) return z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp, variant): sub =", "z3_translate (x, ctx): \"\"\" A version of z3.AstRef.translate that handles sorts and function", "by negating the problem' cex_size = 0 curr_exist_consts = [] curr_matrix_disjs = []", "in given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts", "expr.var_name (i) v_sort = expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars", "# obtain a model m of qvars consistent with (matrix /\\ side_cons) #", "t = z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr () def is_ite (exp): return", "(exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp)", "cex_size+1 # look for a cex of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall", "head_decl = head.decl () # ensure head_decl is in preds head_key = exp_key", "exist_consts, model_completion=True) return (sub, None, None) else: assert e.is_forall () (univ_consts, matrix) =", "print 'Matrix:', matrix w_cons = [] # constraints for witness # initialize w", "quantified variables which can # be hard to eliminate by qe_lite #e =", "(z3.ForAll ([v], matrix)).as_expr () else: matrix = t (z3.Exists ([v], matrix)).as_expr () e", "enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule, name=name) rules [name] = rule", "'{} : {}'.format ('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp, lvl, pred):", "# TODO: split cex_model into list of models for the original set of", "elif z3.is_or (exp): return z3.Or (*args) elif is_ite (exp): impl1 = z3.Implies (args[0],", "k = exp_key (e) if k not in sel_keys: sel_keys.append (k) yield e", "reversed (range (expr.num_vars ())): v_name = expr.var_name (i) v_sort = expr.var_sort (i) consts.append", "(name, const.sort ()) def mk_exp_variant_sub (exp, variant): sub = [] for const in", "def unique_const_leaves (exp): for l in unique_leaves (exp): if not z3.is_var (l): yield", "not z3.is_quantifier (exp): return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite", "(exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf in insert_and_yield (exp):", "(exp): e = cofactor_term_ite (exp) e = elim_bool_ite (e) # Alternatively, we could", "qe_lite #e = elim_bool_ite (exp) #e = elim_term_ite (e) return e # sampling", "e2): if z3.eq (e1, const): ret_val = z3.simplify (e2) else: assert z3.is_app (e1)", "conjunction of implications when it appears as a boolean atom # (i.e. an", "(arg0, e2+arg1) return ret_val if eq_terms is None: eq_terms = [] if done_exp", "= z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier", "z3.Not (matrix) else: res = qf_exp return res def qe_lite (exp): if not", "e = z3.Exists (qvars, matrix) return e if not z3.is_bool (exp): return exp", "(x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx) for i in range (x.arity", "= strip_qblock (exp) matrix = elim_bool_ite (matrix) if exp.is_forall (): e = z3.ForAll", "if len (args) == 0: return mk_true (ctx=ctx) else: return z3.And (*args) def", "const): found = False for l in unique_const_leaves (exp): if z3.eq (l, const):", "lvl, pred): lemmas = [] for l in range (lvl, fp.get_num_levels (pred) +", "(i), ctx) for i in range (x.arity ())] sorts.append (z3_translate (x.range (), ctx))", "w.check () if res == z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts, witnesses)", "by qe def full_qe (exp): temp = qe_lite (exp) return qe (temp) def", "it appears as a boolean atom # (i.e. an atom in the boolean", "check if the witness is sufficient s = z3.Solver (ctx=exp.ctx) print 'checking validity", "problem, looking for # counterexamples of increasing size, similar to BMC def solve_exists_forall_incremental", "in range (lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta", "+ str (i) sort = decl.domain (i) args.append (z3.Const (name, sort)) return args", "into list of models for the original set of # universal variables return", "(an existentially quantified fml) under the # constraints 'side_cons' on the free variables;", "('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed by qe def full_qe (exp): temp", "matrix = z3.substitute_vars (expr.body (), *consts) return (consts, matrix) def get_preds (fp): preds", "() == z3.unsat: print 'TRUE\\n', sub if model: return (m, None, None) return", "arg1 = e1.arg (1) if z3.eq (arg1, const): if is_add: ret_val = z3.simplify", "cons.sexpr () w = z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add (cons) #", "lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas)) #", "# swap if has_const (arg0, const): t = process_eq (arg0, arg1) if t", "(exp): arg0 = exp.arg (0) arg1 = exp.arg (1) if has_const (arg1, const):", "return e # sampling based method for quant. alternation; # given_insts is a", "w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not", "yield l def exp_has_const_leaf (exp, l): for m in unique_const_leaves (exp): if l.eq", "= z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix) = strip_qblock (qf_exp)", "return args # check if fml is sat with given side constraints def", "print 'New instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix,", "generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules", "ctx=ctx) def mk_and (args, ctx=None): if len (args) == 0: return mk_true (ctx=ctx)", "get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules = dict () # map from", "(v_name, v_sort)) matrix = z3.substitute_vars (expr.body (), *consts) return (consts, matrix) def get_preds", "= sort.domain () val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value ()) for", "'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True,", "eq_terms.append (e) return True return False def process_eq (e1, e2): if z3.eq (e1,", "created -- use z3.eq instead def exp_key (e): return e.ast.value def match_exp (exp1,", "once per quantified variable, for better result for i in range (exp.num_vars ()):", "body => head where head is a QF predicate instance # obtain the", "cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = cofactor_term_ite", "equality term # (so, we can't handle \"(const==0)==b\", \"ite (const==0, x, y) ==", "in range (len (post_consts)): post_key = post_const_keys [i] if post_key not in pre_const_keys:", "[] for const in consts: # treat arrays specially due to the else_value", "the following with the caveat that # elim_term_ite introduces new existentially quantified variables", "if done_exp is None: done_exp = [] for e in done_exp: if e.eq", "(exp): return z3.Or (*args) elif is_ite (exp): impl1 = z3.Implies (args[0], args[1]) impl2", "in the boolean structure of exp) def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars,", "pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set", "return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite", "= strip_qblock (qf_exp) if len (qvars) > 0: res = z3.ForAll (qvars, z3.Not", "for l in unique_leaves (exp): if not z3.is_var (l): yield l def unique_var_leaves", "rewrite ite as conjunction of implications when it appears as a boolean atom", "() for i in reversed (range (expr.num_vars ())): v_name = expr.var_name (i) v_sort", "we could have done the following with the caveat that # elim_term_ite introduces", "for i in range (exp.num_args ()): for leaf in unique_leaves (exp.arg (i), leaf_keys):", "print 'FALSE\\n', new_insts return (None, new_insts, witnesses) m = w.model () witnesses.append (m)", "args[2]) return z3.And (impl1, impl2) else: return decl (*args) def elim_ite (exp): e", "# sub-dag is already processed if z3.is_eq (exp): arg0 = exp.arg (0) arg1", "return x.translate (ctx) def translate_pair_list (l, ctx): res = [] for (a,b) in", "exp_key (exp1) == exp_key (exp2) # iterator for declarations of arguments of exp", "the negation of the given problem, looking for # counterexamples of increasing size,", "in another equality term # (so, we can't handle \"(const==0)==b\", \"ite (const==0, x,", "if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_bool_ite (matrix) if", "consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body (), *consts) return (consts, matrix)", "(qvars, matrix) = strip_qblock (e) for v in qvars: if exp.is_forall (): matrix", "accommodate for alternative representations of the same equality (e.g. const==1 # vs. 1==const)", "sub if model: return (m, None, None) return (sub, None, None) inst =", "'{}_{}'.format (c.decl ().name (), str (cex_size)) const = z3.Const (name, c.sort ()) curr_exist_consts.append", "head is a QF predicate instance # obtain the head unused_, matrix =", "return True if exp1 is None and exp2 is not None: return False", "(ctx=fml.ctx) s.add (fml) if side_cons is not None: for cons in side_cons: s.add", "l.eq (m): return True return False def unique_selects (exp, sel_keys=None): def insert_and_yield (e):", "return z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx) def", "= get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules = dict () # map", "= s.check () if res == z3.unsat: print 'TRUE\\n', sub if model: return", "[] for c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (cex_size))", "doesn't seem to quite work when new expressions are created -- use z3.eq", "== exp_key (exp2) # iterator for declarations of arguments of exp # if", "(arg1, const): arg0,arg1 = arg1,arg0 # swap if has_const (arg0, const): t =", "in other words, we solve the negation of the given problem, looking for", "*sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:', cex_size+1 # look for a", "return z3.IntSort (ctx=ctx) else : assert x.is_real () return z3.RealSort (ctx=ctx) if isinstance", "for i,rule in enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule, name=name) rules", "matrix) = strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is", "= z3.K(val_sort, val_interp.else_value ()) for i in range (val_interp.num_entries ()): entry = val_interp.entry", "(expr): if not z3.is_quantifier (expr): return ([], expr) consts = list () for", "isinstance (sort, z3.ArraySortRef): val_interp = m [const] if (val_interp is not None) and", "if model: return (m, None, None) return (sub, None, None) # increment size", "(i), sel_keys): yield sel if z3.is_select (exp): for sel in insert_and_yield (exp): yield", "(qvars, matrix) = strip_qblock (exp) matrix = elim_bool_ite (matrix) if exp.is_forall (): e", "(None, exp) elif z3.is_app (exp): for i in range (exp.num_args ()): for (t,eq)", "()] = head_decl return preds def print_lemmas (fp): preds = get_preds (fp) print", "= z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix) return e pre_consts", "eq_terms: if z3.eq (t,e): found = True break if not found: eq_terms.append (e)", "have done the following with the caveat that # elim_term_ite introduces new existentially", "utility routines for Z3 # ############################################ import z3 I = z3.IntSort () B", "(m, None, None) return (sub, None, None) inst = s.model () new_insts.append (inst)", "> 0: res = z3.ForAll (qvars, z3.Not (matrix)) else: res = z3.Not (matrix)", "for cons in w_cons: w.add (cons) # obtain witness for instances res =", "treat arrays specially due to the else_value sort = const.sort () if isinstance", "univ_consts: name = '{}_{}'.format (c.decl ().name (), str (cex_size)) const = z3.Const (name,", "done_exp is None: done_exp = [] for e in done_exp: if e.eq (exp):", "return True return False def process_eq (e1, e2): if z3.eq (e1, const): ret_val", "exp_key (e) if k not in leaf_keys: leaf_keys.append (k) yield e if leaf_keys", "s.add (matrix) if side_cons is not None: for c in side_cons: s.add (c)", "if exp.is_forall (): return z3.ForAll (qvars, matrix) else: return z3.Exists (qvars, matrix) t", "if post_key not in pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts) > 0:", "with given side constraints def check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add", "obtain a model m of qvars consistent with (matrix /\\ side_cons) # the", "if exp.is_forall (): matrix = t (z3.ForAll ([v], matrix)).as_expr () else: matrix =", "Forall vars. body => head where head is a QF predicate instance #", ": {}'.format ('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp, lvl, pred): lemmas", "= z3.Solver (ctx=exp.ctx) print 'checking validity of ', z3.substitute (matrix, *sub) s.add (z3.Not", "z3.And (impl1, impl2) else: return decl (*args) def elim_ite (exp): e = cofactor_term_ite", "= z3.IntSort () B = z3.BoolSort () def z3_translate (x, ctx): \"\"\" A", "(exp.num_args ()) : arg = exp.arg (i) if z3.is_app (arg): yield arg.decl ()", "already processed if z3.is_eq (exp): arg0 = exp.arg (0) arg1 = exp.arg (1)", "return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int (): return z3.IntSort (ctx=ctx) else", "preds head_key = exp_key (head_decl) if head_key in pred_keys: continue pred_keys.append (head_key) preds", "term # (so, we can't handle \"(const==0)==b\", \"ite (const==0, x, y) == 3\",", "# check if any of the witnesses already works for m in witnesses:", "str (i) sort = decl.domain (i) args.append (z3.Const (name, sort)) return args #", "yield arg.decl () def has_const (exp, const): found = False for l in", "can take >2 args and # decl(*args) doesn't seem to work with the", "else: return z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr", "z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate)", "# just an smt problem m = check_sat (e) if m is None:", "extract_consts (e) post_const_keys = map (exp_key, post_consts) exist_consts = [] for i in", "if res == z3.unsat: return mk_false (ctx=exp.ctx) m = s.model () sub =", "(exp, leaf_keys=None): def insert_and_yield (e): k = exp_key (e) if k not in", "sat with given side constraints def check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx)", "()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield (t, eq)", "z3.simplify (z3.And (*lemmas)) # doesn't seem to quite work when new expressions are", "= [] if done_exp is None: done_exp = [] for e in done_exp:", "(z3.substitute (matrix, *sub)) # like above, but looks for counterexamples of increasing size;", "(validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return (q, fp) def", "exp; # # to accommodate for alternative representations of the same equality (e.g.", "(exp): return exp e = exp t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke", "\"ite (const==0, x, y) == 3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None):", "of const is 1; # # assume that no equality term appears in", "# rule_exp is a quantified formula representing the rule of the form: #", "= z3.Not (matrix) else: res = qf_exp return res def qe_lite (exp): if", "z3.eq (e1, const): ret_val = z3.simplify (e2) else: assert z3.is_app (e1) if not", "yield e if leaf_keys is None: leaf_keys = [] if z3.is_const (exp) and", "= strip_qblock (e) print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:',", "(exp) e = elim_bool_ite (e) # Alternatively, we could have done the following", "(t): yield (t, exp) else: yield (None, exp) elif z3.is_app (exp): for i", "consistent with (matrix /\\ side_cons) # the under-approx. is obtained as \"matrix [m/qvars]\"", "e = t (exp).as_expr () # tactic introduces new constants which need to", "w.add (cons) # obtain witness for instances res = w.check () if res", "an smt problem m = check_sat (e) if m is None: return (None,", "# in other words, we solve the negation of the given problem, looking", "is already processed if z3.is_eq (exp): arg0 = exp.arg (0) arg1 = exp.arg", "= z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix) return e if", "unique_const_leaves (exp): if l.eq (m): return True return False def unique_selects (exp, sel_keys=None):", "z3.is_rational_value (exp)): for leaf in insert_and_yield (exp): yield leaf elif z3.is_app (exp): for", "# invoke qe_lite for each variable, separately (qvars, matrix) = strip_qblock (e) for", "tactic introduces new constants which need to be existentially quantified post_consts = extract_consts", "side_cons) # the under-approx. is obtained as \"matrix [m/qvars]\" # # this is", "eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k = exp_key", "# # let exp = Exists (qvars, matrix) # obtain a model m", "if given_insts is not None: for inst in given_insts: sub = mk_subst_from_model (inst,", "fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return", "qe_lite (exp): if not z3.is_quantifier (exp): return exp e = exp t =", "return e.ast.value def match_exp (exp1, exp2): if exp1 is None and exp2 is", "[z3_translate (x.domain (i), ctx) for i in range (x.arity ())] sorts.append (z3_translate (x.range", "(x.arity ())] sorts.append (z3_translate (x.range (), ctx)) return z3.Function (x.name (), *sorts) if", "False if exp.is_forall (): is_forall = True (qvars, matrix) = strip_qblock (exp) exp", "(l, ctx): res = [] for (a,b) in l: new_p = (z3_translate (a,", "exp (an existentially quantified fml) under the # constraints 'side_cons' on the free", "e = z3.Exists (qvars, matrix) return e pre_consts = extract_consts (exp) pre_const_keys =", "z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is not None: for c in side_cons:", "return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as conjunction of implications when it", "check if any of the witnesses already works for m in witnesses: w", "def get_level_lemmas (fp, lvl, pred): lemmas = [] for l in range (lvl,", "(c.decl ().name (), str (i)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const)", "# assume that const only appears in simple arithmetic terms and the coefficient", "head is not None # obtain head_decl head_decl = head.decl () # ensure", "e.is_forall () (univ_consts, matrix) = strip_qblock (e) print 'Exist consts:', exist_consts print 'Univ", "given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules = dict", "the witness is sufficient s = z3.Solver (ctx=exp.ctx) print 'checking validity of ',", "else: val = m.eval (const, model_completion=model_completion) sub.append ( (const, val) ) return sub", "in side_cons: s.add (cons) res = s.check () if res == z3.sat: return", "representing the rule of the form: # Forall vars. body => head where", "is None: sel_keys = [] # post-order if z3.is_app (exp): for i in", "s.model () sub = mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix, *sub) def", "(e1, const): ret_val = z3.simplify (e2) else: assert z3.is_app (e1) if not (z3.is_app_of", "else: ret_val = z3.simplify (arg0-e2) else: if is_add: ret_val = process_eq (arg0, e2-arg1)", "under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall () (qvars, matrix) =", "return z3.Or (*args) elif is_ite (exp): impl1 = z3.Implies (args[0], args[1]) impl2 =", "strip_qblock (qf_exp) if len (qvars) > 0: res = z3.ForAll (qvars, z3.Not (matrix))", "(a, ctx), z3_translate (b, ctx)) res.append (new_p) return res def mk_true (ctx=None): return", "# Alternatively, we could have done the following with the caveat that #", "(m, None, None) sub = mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None, None)", "const only appears in simple arithmetic terms and the coefficient # of const", "pred): lemmas = [] for l in range (lvl, fp.get_num_levels (pred) + 1):", "= [] for const in consts: # treat arrays specially due to the", "cex_model into list of models for the original set of # universal variables", "() else: return None def mk_subst_from_model (m, consts, model_completion=False): sub = [] for", "matrix = elim_term_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else:", "variables, they are ignored def arg_decls (exp): for i in range (exp.num_args ())", "(0), entry.value ()) else: val = m.eval (const, model_completion=model_completion) else: val = m.eval", "(exp.num_args ()): # args are the array and the idx for sel in", "elif is_ite (exp): impl1 = z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]),", "in unique_selects (exp.arg (i), sel_keys): yield sel if z3.is_select (exp): for sel in", "is_forall = False if exp.is_forall (): is_forall = True (qvars, matrix) = strip_qblock", "sort.domain () val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value ()) for i", "ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix) = strip_qblock (qf_exp) if len (qvars)", "given_insts=None, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall", "for m in witnesses: w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts,", "k not in sel_keys: sel_keys.append (k) yield e if sel_keys is None: sel_keys", "z3.IntSort () B = z3.BoolSort () def z3_translate (x, ctx): \"\"\" A version", "= qe_lite (exp) return qe (temp) def qe_sat (exp): t = z3.Tactic ('qe-sat',", "model_completion=True) return (sub, None, None) else: assert e.is_forall () (univ_consts, matrix) = strip_qblock", "(z3.And (*lemmas)) # doesn't seem to quite work when new expressions are created", "m [const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp): idx_sort =", "'New witness:', sub # check if the witness is sufficient s = z3.Solver", "negating the problem' cex_size = 0 curr_exist_consts = [] curr_matrix_disjs = [] for", "looks for counterexamples of increasing size; # in other words, we solve the", "= [] # constraints for witness # initialize w with given_insts if given_insts", "processed if z3.is_eq (exp): arg0 = exp.arg (0) arg1 = exp.arg (1) if", "equality terms on const; # # each pair (t,eq) is such that eq", "of terms, we return (None,eq) for the # duplicates (in the above example,", "exp.arg (0) arg1 = exp.arg (1) if has_const (arg1, const): arg0,arg1 = arg1,arg0", "= m [const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp) :", "const in consts: # treat arrays specially due to the else_value sort =", "() def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix", "by the user as a # starting point def solve_exists_forall (exp, given_insts=None, model=False):", "mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal (val,", "terms, we return (None,eq) for the # duplicates (in the above example, we", "(pred) for i,rule in enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule, name=name)", "(exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr () #", "(exp) matrix = cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll (qvars, matrix) else:", "of increasing size; # in other words, we solve the negation of the", "predicate: ', pred n = fp.get_num_levels (pred) for i in range (n): print", "in range (cex_size): sub = [] for c in univ_consts: name = '{}_{}'.format", "sort)) return args # check if fml is sat with given side constraints", "found = True break return found # iterator for all equality terms on", "return t (exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) =", "for new witness:\\n', m sub = mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:',", "(e) post_const_keys = map (exp_key, post_consts) exist_consts = [] for i in range", "U and E are univ_consts and exist_consts # add a new set of", "if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head = matrix assert", "is such that eq is an equality logically equivalent to (const==t) # appearing", "return exp e = exp t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite", "(c) res = s.check () if res == z3.unsat: return mk_false (ctx=exp.ctx) m", "z3.is_quantifier (exp): return exp e = exp t = z3.Tactic ('qe-light', ctx=exp.ctx) #", "= extract_consts (exp) pre_const_keys = map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx)", "t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once per quantified variable, for", "for i in range (len (post_consts)): post_key = post_const_keys [i] if post_key not", "z3.ArraySortRef): val_interp = m [const] if (val_interp is not None) and isinstance (val_interp,", "z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as conjunction", "that handles sorts and function declarations correctly\"\"\" if x.ctx == ctx: return x", "e if leaf_keys is None: leaf_keys = [] if z3.is_const (exp) and not", "(ctx=ctx) rules = dict () # map from names to rules if not", "True: print 'Solver for witness:' for cons in w_cons: print cons.sexpr () w", "as conjunction of implications when it appears as a boolean atom # (i.e.", "new constants which need to be existentially quantified post_consts = extract_consts (e) post_const_keys", "of ', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver for", "# initialize w with given_insts if given_insts is not None: for inst in", "fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule,", "exp.arg (1) if has_const (arg1, const): arg0,arg1 = arg1,arg0 # swap if has_const", "with given_insts if given_insts is not None: for inst in given_insts: sub =", "range (exp.num_args ()): # args are the array and the idx for sel", "l): for m in unique_const_leaves (exp): if l.eq (m): return True return False", "def z3_translate (x, ctx): \"\"\" A version of z3.AstRef.translate that handles sorts and", "(k) yield e if sel_keys is None: sel_keys = [] # post-order if", "len (qvars) > 0: res = z3.ForAll (qvars, z3.Not (matrix)) else: res =", "preds = get_preds (fp) print for pred in preds.itervalues (): print 'Lemmas for", "instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) #", "[i] if post_key not in pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts) >", "*sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver for validity:', z3.Not (z3.substitute (matrix,", "name = '{}_{}'.format (c.decl ().name (), str (cex_size)) const = z3.Const (name, c.sort", "().name (), variant) return z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp, variant): sub", "exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall () (exist_consts, e) = strip_qblock", "(exp, side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall () (qvars, matrix) = strip_qblock", "z3.FuncInterp) : idx_sort = sort.domain () val_sort = sort.range () val = z3.K(val_sort,", "def match_exp (exp1, exp2): if exp1 is None and exp2 is None: return", "same equality (e.g. const==1 # vs. 1==const) and to avoid repetitions of terms,", "exp_key (head_decl) if head_key in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()] =", "is None and exp2 is not None: return False if exp2 is None", "l in range (lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append", "= m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) sub.append ( (const,", "name = startswith + str (i) sort = decl.domain (i) args.append (z3.Const (name,", "None: leaf_keys = [] if z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value", "m in witnesses: w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True)", "not z3.is_var (l): yield l def unique_var_leaves (exp): for l in unique_leaves (exp):", "= t (e).as_expr () if not z3.is_quantifier (e): return e if z3.is_quantifier (e):", "1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas))", "= t (z3.ForAll ([v], matrix)).as_expr () else: matrix = t (z3.Exists ([v], matrix)).as_expr", "in exp; # # to accommodate for alternative representations of the same equality", "exp) else: yield (None, exp) elif z3.is_app (exp): for i in range (exp.num_args", "elif z3.is_app (exp): for i in range (exp.num_args ()): for leaf in unique_leaves", "in range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp):", "= exp_key (e) if k not in sel_keys: sel_keys.append (k) yield e if", "return z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr ()", "for const in consts: # treat arrays specially due to the else_value sort", "quantified post_consts = extract_consts (e) post_const_keys = map (exp_key, post_consts) exist_consts = []", "introduces new existentially quantified variables which can # be hard to eliminate by", "increasing size, similar to BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:',", "(ctx=ctx) if not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set", "in insert_and_yield (exp): yield leaf def unique_const_leaves (exp): for l in unique_leaves (exp):", "return e def qe (exp): if not z3.is_quantifier (exp): return exp return z3.Tactic", "mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts = list () witnesses", "(arg1, const): if is_add: ret_val = z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2)", "(e2) else: assert z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1,", "a new set of exist consts sub = [] for c in univ_consts:", "def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None): if", "unique_const_leaves (exp): res.append (c) return res def mk_const_variant (const, variant): name = '{}_{}'.format", "(z3.Exists ([v], matrix)).as_expr () e = matrix return e def qe (exp): if", "(m, consts, model_completion=False): eqs = [] for const in consts: # treat arrays", "None def mk_subst_from_model (m, consts, model_completion=False): sub = [] for const in consts:", "in w_cons: w.add (cons) # obtain witness for instances res = w.check ()", "(in the above example, we yield (1,const==1) and (None,1==const)); # # assume that", "e) return qe_lite (e) # obtain an under-approx of exp (an existentially quantified", "*sub))) print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res = s.check", "are created -- use z3.eq instead def exp_key (e): return e.ast.value def match_exp", "z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head = matrix assert head is not", "res = w.check () if res == z3.unsat: print 'FALSE\\n', new_insts return (None,", "[] if done_exp is None: done_exp = [] for e in done_exp: if", "break if not found: eq_terms.append (e) return True return False def process_eq (e1,", "args = [] for i in range (decl.arity ()): name = startswith +", "'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix print 'Solving by", "= mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:', sub # check if the", "# tactic introduces new constants which need to be existentially quantified post_consts =", "exp2 is None: return True if exp1 is None and exp2 is not", "z3.Function (x.name (), *sorts) if ctx is None: return x.translate (ctx=z3.main_ctx ()) return", "True break if not found: eq_terms.append (e) return True return False def process_eq", "matrix) = strip_qblock (e) for v in qvars: if exp.is_forall (): matrix =", "const.sort () if isinstance (sort, z3.ArraySortRef): val_interp = m [const] if (val_interp is", "fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation (pred)", "followed by qe def full_qe (exp): temp = qe_lite (exp) return qe (temp)", "Alternatively, we could have done the following with the caveat that # elim_term_ite", "[] for const in unique_const_leaves (exp): const_variant = mk_const_variant (const, variant) sub.append (", "etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert (e): found = False", "res = qf_exp return res def qe_lite (exp): if not z3.is_quantifier (exp): return", "univ_consts print 'Matrix:', matrix print 'Solving by negating the problem' cex_size = 0", "def print_lemmas (fp): preds = get_preds (fp) print for pred in preds.itervalues ():", "exp.is_forall (): is_forall = True (qvars, matrix) = strip_qblock (exp) exp = z3.Exists", "z3.FuncInterp): idx_sort = sort.domain () val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value", "# obtain head_decl head_decl = head.decl () # ensure head_decl is in preds", "('nnf', ctx=exp.ctx) return t (exp).as_expr () def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE)", "-- use z3.eq instead def exp_key (e): return e.ast.value def match_exp (exp1, exp2):", "quantified formula representing the rule of the form: # Forall vars. body =>", "def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall () (qvars, matrix)", "entry.value ()) else: val = m.eval (const, model_completion=model_completion) else: val = m.eval (const,", "(m, consts, model_completion=False): sub = [] for const in consts: # treat arrays", "not None: return False return exp_key (exp1) == exp_key (exp2) # iterator for", "(arg): yield arg.decl () def has_const (exp, const): found = False for l", "# invoke qe_lite once per quantified variable, for better result for i in", "e.ast.value def match_exp (exp1, exp2): if exp1 is None and exp2 is None:", "return z3.And (impl1, impl2) else: return decl (*args) def elim_ite (exp): e =", "(), str (cex_size)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append (", "as \"matrix [m/qvars]\" # # this is the weakest under-approx. if side_cons is", "work when new expressions are created -- use z3.eq instead def exp_key (e):", "res == z3.unsat: return mk_false (ctx=exp.ctx) m = s.model () sub = mk_subst_from_model", "formula representing the rule of the form: # Forall vars. body => head", "i in range (exp.num_args ()): for leaf in unique_leaves (exp.arg (i), leaf_keys): yield", "insert_and_yield (e): k = exp_key (e) if k not in leaf_keys: leaf_keys.append (k)", "to rules if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set", "# # to accommodate for alternative representations of the same equality (e.g. const==1", "from names to rules if not pp: print 'No pre-processing' fp.set (slice=False) fp.set", "z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def", "= '{}_{}'.format (c.decl ().name (), str (i)) const = z3.Const (name, c.sort ())", "############################################ # # Some utility routines for Z3 # ############################################ import z3 I", "print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall () (exist_consts,", "sel_keys = [] # post-order if z3.is_app (exp): for i in range (exp.num_args", "while True: print 'Solver for witness:' for cons in w_cons: print cons.sexpr ()", "pred)) print def get_level_lemmas (fp, lvl, pred): lemmas = [] for l in", "exp2): if exp1 is None and exp2 is None: return True if exp1", "Or because they can take >2 args and # decl(*args) doesn't seem to", "simple arithmetic terms and the coefficient # of const is 1; # #", "[m/qvars]\" # # this is the weakest under-approx. if side_cons is a point", "def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp)", "print 'TRUE\\n', sub if model: return (m, None, None) return (sub, None, None)", "routines for Z3 # ############################################ import z3 I = z3.IntSort () B =", "if e.eq (exp): return # sub-dag is already processed if z3.is_eq (exp): arg0", "pred in given_preds.itervalues (): fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules ()): name", "of the witnesses already works for m in witnesses: w = z3.Solver (ctx=exp.ctx)", "() # ensure head_decl is in preds head_key = exp_key (head_decl) if head_key", "(exp): for l in unique_leaves (exp): if z3.is_var (l): yield l def exp_has_const_leaf", "ret_val = z3.simplify (e2) else: assert z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD)", "eqs.append (const == val) return eqs def qe_array (exp): if not z3.is_quantifier (exp):", "(exp).as_expr () # tactic introduces new constants which need to be existentially quantified", "0: return mk_true (ctx=ctx) else: return z3.And (*args) def create_fp (smt2file, ctx=None, pp=False,", "match_exp (exp1, exp2): if exp1 is None and exp2 is None: return True", "e1.arg (1) if z3.eq (arg1, const): if is_add: ret_val = z3.simplify (e2-arg0) else:", "fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation (pred) for i,rule", "insert_and_yield (exp): yield leaf def unique_const_leaves (exp): for l in unique_leaves (exp): if", "False def process_eq (e1, e2): if z3.eq (e1, const): ret_val = z3.simplify (e2)", "new expressions are created -- use z3.eq instead def exp_key (e): return e.ast.value", "not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True,", "(exp): temp = qe_lite (exp) return qe (temp) def qe_sat (exp): t =", "else: res = qf_exp return res def qe_lite (exp): if not z3.is_quantifier (exp):", "cex_model print 'Size:', cex_size+1 # TODO: split cex_model into list of models for", "matrix assert head is not None # obtain head_decl head_decl = head.decl ()", "def nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr () def", "not None: return False if exp2 is None and exp1 is not None:", "need to worry about And and Or because they can take >2 args", "l in unique_leaves (exp): if not z3.is_var (l): yield l def unique_var_leaves (exp):", "if len (qvars) > 0: res = z3.ForAll (qvars, z3.Not (matrix)) else: res", "engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False) fp.set (inline_linear=False)", "(exp): if not z3.is_var (l): yield l def unique_var_leaves (exp): for l in", "witness for instances res = w.check () if res == z3.unsat: print 'FALSE\\n',", "m is None: return (None, None, None) else: if model: return (m, None,", "print 'Univ consts:', univ_consts print 'Matrix:', matrix w_cons = [] # constraints for", "only appears in simple arithmetic terms and the coefficient # of const is", "get_preds (fp): preds = dict () pred_keys = [] for rule_exp in fp.get_rules", "in insert_and_yield (exp): yield sel def extract_consts (exp): res = [] for c", "s.add (fml) if side_cons is not None: for cons in side_cons: s.add (cons)", "return z3.ForAll (qvars, matrix) else: return z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite',", "cex of current size # check if any of the witnesses already works", "range (n): print '{} : {}'.format (i, fp.get_cover_delta (i, pred)) print '{} :", "variant): name = '{}_{}'.format (const.decl ().name (), variant) return z3.Const (name, const.sort ())", "[] for rule_exp in fp.get_rules (): # rule_exp is a quantified formula representing", "for the universals given by the user as a # starting point def", "i in range (n): print '{} : {}'.format (i, fp.get_cover_delta (i, pred)) print", "const is 1; # # assume that no equality term appears in another", "s.check () if res == z3.unsat: print 'TRUE\\n', sub if model: return (m,", "to eliminate by qe_lite #e = elim_bool_ite (exp) #e = elim_term_ite (e) return", "# where U and E are univ_consts and exist_consts # add a new", "exp e = exp t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once", "z3 I = z3.IntSort () B = z3.BoolSort () def z3_translate (x, ctx):", "= mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts = list ()", "name=name) rules [name] = rule return (q, fp, rules) def create_empty_fp (ctx=None, pp=False,", "('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr () # tactic introduces new constants which", "range (exp.num_args ()): for leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf else:", "terms and the coefficient # of const is 1; # # assume that", "{}'.format (i, fp.get_cover_delta (i, pred)) print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred))", "could have done the following with the caveat that # elim_term_ite introduces new", "# # Some utility routines for Z3 # ############################################ import z3 I =", "not found: eq_terms.append (e) return True return False def process_eq (e1, e2): if", "= head.decl () # ensure head_decl is in preds head_key = exp_key (head_decl)", "True (qvars, matrix) = strip_qblock (exp) exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp", "elim_bool_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e =", "def elim_ite (exp): e = cofactor_term_ite (exp) e = elim_bool_ite (e) # Alternatively,", "for a cex of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not (matrix),", "pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: print 'No pre-processing'", "for all equality terms on const; # # each pair (t,eq) is such", "() if res == z3.unsat: print 'TRUE\\n', sub if model: return (m, None,", "'side_cons' on the free variables; # # let exp = Exists (qvars, matrix)", "z3.Store (val, entry.arg_value (0), entry.value ()) else: val = m.eval (const, model_completion=model_completion) else:", "x if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int", "None) else: if model: return (m, None, None) sub = mk_subst_from_model (m, exist_consts,", "exp.children ()) # need to worry about And and Or because they can", "(): print 'Lemmas for predicate: ', pred n = fp.get_num_levels (pred) for i", "strip_qblock (exp) if not z3.is_quantifier (e): # just an smt problem m =", "in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield (t, eq) done_exp.append (exp) def", "another equality term # (so, we can't handle \"(const==0)==b\", \"ite (const==0, x, y)", "print '{} : {}'.format (i, fp.get_cover_delta (i, pred)) print '{} : {}'.format ('oo',", "= '{}_{}'.format (c.decl ().name (), str (cex_size)) const = z3.Const (name, c.sort ())", "just an smt problem m = check_sat (e) if m is None: return", "cofactor_term_ite (exp) e = elim_bool_ite (e) # Alternatively, we could have done the", "under-approx. if side_cons is a point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp)", "sub = [] for c in univ_consts: name = '{}_{}'.format (c.decl ().name (),", "if z3.eq (arg1, const): if is_add: ret_val = z3.simplify (e2-arg0) else: ret_val =", "matrix = t (z3.Exists ([v], matrix)).as_expr () e = matrix return e def", "pre_const_keys = map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t", "counterexamples of increasing size, similar to BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists", "(range (expr.num_vars ())): v_name = expr.var_name (i) v_sort = expr.var_sort (i) consts.append (z3.Const", "'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO: split cex_model into list of models", "if isinstance (sort, z3.ArraySortRef): val_interp = m [const] if (val_interp is not None)", "z3.is_quantifier (e): # invoke qe_lite for each variable, separately (qvars, matrix) = strip_qblock", "assume that const only appears in simple arithmetic terms and the coefficient #", "None: return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list (l, ctx): res", "i in range (x.arity ())] sorts.append (z3_translate (x.range (), ctx)) return z3.Function (x.name", "is_add: ret_val = z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2) else: if is_add:", "decl = exp.decl () args = map (elim_bool_ite, exp.children ()) # need to", "# iterator for all equality terms on const; # # each pair (t,eq)", "None: if insert (t): yield (t, exp) else: yield (None, exp) elif z3.is_app", "*sub)).sexpr () res = s.check () if res == z3.unsat: print 'TRUE\\n', sub", "sub if model: return (m, None, None) return (sub, None, None) # increment", "for i in range (cex_size): sub = [] for c in univ_consts: name", "(m): return True return False def unique_selects (exp, sel_keys=None): def insert_and_yield (e): k", "we return (None,eq) for the # duplicates (in the above example, we yield", "if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int ():", "i in range (val_interp.num_entries ()): entry = val_interp.entry (i) val = z3.Store (val,", "print_lemmas (fp): preds = get_preds (fp) print for pred in preds.itervalues (): print", "= expr.var_name (i) v_sort = expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix =", "validity of ', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver", "()): name = 'r'+str(i) fp.add_rule (rule, name=name) rules [name] = rule return (q,", "val = m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) eqs.append (const", "if model: return (m, None, None) return (sub, None, None) inst = s.model", "idx for sel in unique_selects (exp.arg (i), sel_keys): yield sel if z3.is_select (exp):", "= list () for i in reversed (range (expr.num_vars ())): v_name = expr.var_name", "(z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body (), *consts) return (consts, matrix) def", "def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp,", "rule return (q, fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp =", ">2 args and # decl(*args) doesn't seem to work with the py interface", "'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res = s.check () if", "ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix)", "z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True)", "(exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield (t,", "# universal variables return (None, cex_model, witnesses) else: # no cex of current", "qvars, model_completion=True) return z3.substitute (matrix, *sub) def nnf (exp): t = z3.Tactic ('nnf',", "e = z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix) return e", "if has_const (arg0, const): t = process_eq (arg0, arg1) if t is not", "(exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_bool_ite (matrix)", "return e if not z3.is_bool (exp): return exp if z3.is_true (exp) or z3.is_false", "curr_exist_consts = [] curr_matrix_disjs = [] for i in range (cex_size): sub =", "model: return (m, None, None) return (sub, None, None) # increment size cex_size", "the py interface if z3.is_and (exp): return z3.And (*args) elif z3.is_or (exp): return", "pred_keys = [] for rule_exp in fp.get_rules (): # rule_exp is a quantified", "exp is_forall = False if exp.is_forall (): is_forall = True (qvars, matrix) =", "v_sort)) matrix = z3.substitute_vars (expr.body (), *consts) return (consts, matrix) def get_preds (fp):", "arguments contain de-bruijn variables, they are ignored def arg_decls (exp): for i in", "[] for l in range (lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l,", "process_eq (e1, e2): if z3.eq (e1, const): ret_val = z3.simplify (e2) else: assert", "done_exp: if e.eq (exp): return # sub-dag is already processed if z3.is_eq (exp):", "has_const (arg0, const): t = process_eq (arg0, arg1) if t is not None:", "(exist_consts, e) return qe_lite (e) # obtain an under-approx of exp (an existentially", "for i in range (x.arity ())] sorts.append (z3_translate (x.range (), ctx)) return z3.Function", "instead def exp_key (e): return e.ast.value def match_exp (exp1, exp2): if exp1 is", "None: for c in side_cons: s.add (c) res = s.check () if res", "(e): k = exp_key (e) if k not in leaf_keys: leaf_keys.append (k) yield", "([], expr) consts = list () for i in reversed (range (expr.num_vars ())):", "z3.is_quantifier (exp) and not exp.is_forall () (exist_consts, e) = strip_qblock (exp) if not", "= z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if", "if t is not None: if insert (t): yield (t, exp) else: yield", "(rule, name=name) rules [name] = rule return (q, fp, rules) def create_empty_fp (ctx=None,", "x.is_int (): return z3.IntSort (ctx=ctx) else : assert x.is_real () return z3.RealSort (ctx=ctx)", "# check if fml is sat with given side constraints def check_sat (fml,", "return found # iterator for all equality terms on const; # # each", "def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal", "z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf in insert_and_yield", "not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf in insert_and_yield (exp): yield leaf", "# # each pair (t,eq) is such that eq is an equality logically", "(exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp)", "def insert_and_yield (e): k = exp_key (e) if k not in leaf_keys: leaf_keys.append", "== z3.unsat: print 'TRUE\\n', sub if model: return (m, None, None) return (sub,", "yield leaf def unique_const_leaves (exp): for l in unique_leaves (exp): if not z3.is_var", "(matrix)) else: res = z3.Not (matrix) else: res = qf_exp return res def", "= cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll (qvars, matrix) else: return z3.Exists", "cex_size = 0 curr_exist_consts = [] curr_matrix_disjs = [] for i in range", "None) and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort = sort.range ()", "pred)) print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp,", "qvars: if exp.is_forall (): matrix = t (z3.ForAll ([v], matrix)).as_expr () else: matrix", "True return False def process_eq (e1, e2): if z3.eq (e1, const): ret_val =", "ctx), z3_translate (b, ctx)) res.append (new_p) return res def mk_true (ctx=None): return z3.BoolVal", "# map from names to rules if not pp: print 'No pre-processing' fp.set", "print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix w_cons =", "for i in reversed (range (expr.num_vars ())): v_name = expr.var_name (i) v_sort =", "res == z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts, witnesses) m = w.model", "z3.is_app (exp) decl = exp.decl () args = map (elim_bool_ite, exp.children ()) #", "is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR)", "repetitions of terms, we return (None,eq) for the # duplicates (in the above", "(exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr () def is_ite (exp):", "if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set", "pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False)", "z3.is_and (exp): return z3.And (*args) elif z3.is_or (exp): return z3.Or (*args) elif is_ite", "insert_and_yield (exp): yield sel def extract_consts (exp): res = [] for c in", "= m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) eqs.append (const ==", "z3.unsat: return mk_false (ctx=exp.ctx) m = s.model () sub = mk_subst_from_model (m, qvars,", "() return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i),", "(): e = z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix) return", "if res == z3.unsat: print 'TRUE\\n', sub if model: return (m, None, None)", "== z3.sat: return s.model () else: return None def mk_subst_from_model (m, consts, model_completion=False):", "(inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return (q,", "(t,e): found = True break if not found: eq_terms.append (e) return True return", "= e1.arg (1) if z3.eq (arg1, const): if is_add: ret_val = z3.simplify (e2-arg0)", "z3.is_app (exp): for i in range (exp.num_args ()): for leaf in unique_leaves (exp.arg", "create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file", "= [] if z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for", "return t (exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) =", "a QF predicate instance # obtain the head unused_, matrix = strip_qblock (rule_exp)", "in range (n): print '{} : {}'.format (i, fp.get_cover_delta (i, pred)) print '{}", "('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp, lvl, pred): lemmas = []", "this is the weakest under-approx. if side_cons is a point def under_approx_qe (exp,", "matrix) = strip_qblock (exp) matrix = cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll", "(z3.Not (matrix), *sub)) if w.check () == z3.unsat: print 'TRUE\\n', sub if model:", "in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (cex_size)) const = z3.Const", "use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation (pred) for i,rule in enumerate", "(ctx=exp.ctx) s.add (matrix) if side_cons is not None: for c in side_cons: s.add", "(i.e. an atom in the boolean structure of exp) def elim_bool_ite (exp): if", "return decl (*args) def elim_ite (exp): e = cofactor_term_ite (exp) e = elim_bool_ite", "(True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None):", "= [] for i in range (cex_size): sub = [] for c in", "(exp): for i in range (exp.num_args ()) : arg = exp.arg (i) if", "(post_consts)): post_key = post_const_keys [i] if post_key not in pre_const_keys: exist_consts.append (post_consts [i])", "(exp, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall", "to (const==t) # appearing in exp; # # to accommodate for alternative representations", "(x): if x.is_int (): return z3.IntSort (ctx=ctx) else : assert x.is_real () return", "(ctx=ctx) q = given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx)", "sub = [] for const in consts: # treat arrays specially due to", "(val, ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None): if len (args)", "coefficient # of const is 1; # # assume that no equality term", "handle \"(const==0)==b\", \"ite (const==0, x, y) == 3\", etc.) def unique_eq_terms_on_const (const, exp,", "matrix) = strip_qblock (exp) matrix = elim_term_ite (matrix) if exp.is_forall (): e =", "solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and", "in unique_const_leaves (exp): const_variant = mk_const_variant (const, variant) sub.append ( (const, const_variant) )", "a boolean atom # (i.e. an atom in the boolean structure of exp)", "q = fp.parse_file (smt2file) return (q, fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr',", "z3.is_app (exp): for i in range (exp.num_args ()): # args are the array", "not None) and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort = sort.range", "in side_cons: s.add (c) res = s.check () if res == z3.unsat: return", "= w.model () witnesses.append (m) print 'Model for new witness:\\n', m sub =", "given side constraints def check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml)", "(const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) sub.append ( (const, val) )", "def process_eq (e1, e2): if z3.eq (e1, const): ret_val = z3.simplify (e2) else:", "ctx): res = [] for (a,b) in l: new_p = (z3_translate (a, ctx),", "# check if the witness is sufficient s = z3.Solver (ctx=exp.ctx) print 'checking", "# duplicates (in the above example, we yield (1,const==1) and (None,1==const)); # #", "w_cons.append (z3.substitute (matrix, *sub)) # like above, but looks for counterexamples of increasing", "the # duplicates (in the above example, we yield (1,const==1) and (None,1==const)); #", "seem to work with the py interface if z3.is_and (exp): return z3.And (*args)", "for i in range (exp.num_args ()): # args are the array and the", "(e) # Alternatively, we could have done the following with the caveat that", "in done_exp: if e.eq (exp): return # sub-dag is already processed if z3.is_eq", "the array and the idx for sel in unique_selects (exp.arg (i), sel_keys): yield", "True break return found # iterator for all equality terms on const; #", "given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp", "else: assert e.is_forall () (univ_consts, matrix) = strip_qblock (e) print 'Exist consts:', exist_consts", "= z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp =", "pred)) return z3.simplify (z3.And (*lemmas)) # doesn't seem to quite work when new", "== 3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert (e): found", "(z3.substitute (matrix, *sub)).sexpr () res = s.check () if res == z3.unsat: print", "witnesses already works for m in witnesses: w = z3.Solver (ctx=exp.ctx) sub =", "return (q, fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint", "# let exp = Exists (qvars, matrix) # obtain a model m of", "if res == z3.sat: return s.model () else: return None def mk_subst_from_model (m,", "in l: new_p = (z3_translate (a, ctx), z3_translate (b, ctx)) res.append (new_p) return", "sub = mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:', sub # check if", "return (sub, None, None) inst = s.model () new_insts.append (inst) print 'New instance:\\n',", "(1) if z3.eq (arg1, const): if is_add: ret_val = z3.simplify (e2-arg0) else: ret_val", "= [] for i in range (len (post_consts)): post_key = post_const_keys [i] if", "in range (exp.num_vars ()): e = t (e).as_expr () if not z3.is_quantifier (e):", "matrix) = strip_qblock (e) print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print", "in simple arithmetic terms and the coefficient # of const is 1; #", "take >2 args and # decl(*args) doesn't seem to work with the py", "not None: if insert (t): yield (t, exp) else: yield (None, exp) elif", "the above example, we yield (1,const==1) and (None,1==const)); # # assume that const", "= z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:', cex_size+1", "for Z3 # ############################################ import z3 I = z3.IntSort () B = z3.BoolSort", ": arg = exp.arg (i) if z3.is_app (arg): yield arg.decl () def has_const", "(t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield (t, eq) done_exp.append (exp)", "and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort = sort.range () val", "if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain", "(-1, pred)) return z3.simplify (z3.And (*lemmas)) # doesn't seem to quite work when", "() w = z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add (cons) # obtain", "(smt2file) return (q, fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp =", "not None: for cons in side_cons: s.add (cons) res = s.check () if", "we solve the negation of the given problem, looking for # counterexamples of", "ctx=exp.ctx) (exp).as_expr () # qe_lite followed by qe def full_qe (exp): temp =", "if not found: eq_terms.append (e) return True return False def process_eq (e1, e2):", "(exp) if not z3.is_quantifier (e): # just an smt problem m = check_sat", "done_exp): yield (t, eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e):", "() val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value ()) for i in", "no equality term appears in another equality term # (so, we can't handle", "instance # obtain the head unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix,", "and isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain () val_sort = sort.range ()", "def arg_decls (exp): for i in range (exp.num_args ()) : arg = exp.arg", "const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const) )", "post-order if z3.is_app (exp): for i in range (exp.num_args ()): # args are", "= z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const) ) new_disj", "each variable, separately (qvars, matrix) = strip_qblock (e) for v in qvars: if", "= solve_exists_forall (curr_exp, model=True) if cex_model is not None: print 'FALSE\\n', cex_model print", "which need to be existentially quantified post_consts = extract_consts (e) post_const_keys = map", "for i in range (n): print '{} : {}'.format (i, fp.get_cover_delta (i, pred))", "pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts) > 0: e = z3.Exists (exist_consts,", "exp # if arguments contain de-bruijn variables, they are ignored def arg_decls (exp):", "m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) eqs.append (const == val)", "return z3.simplify (z3.And (*lemmas)) # doesn't seem to quite work when new expressions", "def check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is", "m = s.model () sub = mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix,", "matrix print 'Solving by negating the problem' cex_size = 0 curr_exist_consts = []", "elim_ite (exp): e = cofactor_term_ite (exp) e = elim_bool_ite (e) # Alternatively, we", "(smt2file) given_preds = get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules = dict ()", "and Or because they can take >2 args and # decl(*args) doesn't seem", "to work with the py interface if z3.is_and (exp): return z3.And (*args) elif", "(i, pred)) print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas", "unique_const_leaves (exp): const_variant = mk_const_variant (const, variant) sub.append ( (const, const_variant) ) return", "print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine,", "unique_const_leaves (exp): for l in unique_leaves (exp): if not z3.is_var (l): yield l", ": assert x.is_real () return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts =", "(sort, z3.ArraySortRef): val_interp = m [const] if (val_interp is not None) and isinstance", "return (m, None, None) return (sub, None, None) # increment size cex_size +=", "(exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_bool_ite (matrix) if exp.is_forall ():", "exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr ()", "(val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort = sort.range () val = z3.K(val_sort,", "curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:', cex_size+1 # look for a cex", "works for m in witnesses: w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m,", "(exist_consts) > 0: e = z3.Exists (exist_consts, e) return qe_lite (e) # obtain", "e in done_exp: if e.eq (exp): return # sub-dag is already processed if", "None, None) inst = s.model () new_insts.append (inst) print 'New instance:\\n', inst sub", "() def has_const (exp, const): found = False for l in unique_const_leaves (exp):", "strip_qblock (exp) matrix = elim_bool_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars,", "for the original set of # universal variables return (None, cex_model, witnesses) else:", "return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed by qe def full_qe", "if z3.is_select (exp): for sel in insert_and_yield (exp): yield sel def extract_consts (exp):", "fp.get_rules (): # rule_exp is a quantified formula representing the rule of the", "= z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once per quantified variable, for better", "head_key in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()] = head_decl return preds", "version of z3.AstRef.translate that handles sorts and function declarations correctly\"\"\" if x.ctx ==", "is sat with given side constraints def check_sat (fml, side_cons=None): s = z3.Solver", "(inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr): if", "return exp assert z3.is_app (exp) decl = exp.decl () args = map (elim_bool_ite,", "def insert_and_yield (e): k = exp_key (e) if k not in sel_keys: sel_keys.append", "(l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas)) # doesn't seem", "to be existentially quantified post_consts = extract_consts (e) post_const_keys = map (exp_key, post_consts)", "preds = dict () pred_keys = [] for rule_exp in fp.get_rules (): #", "return (None,eq) for the # duplicates (in the above example, we yield (1,const==1)", "the boolean structure of exp) def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix)", "l in unique_leaves (exp): if z3.is_var (l): yield l def exp_has_const_leaf (exp, l):", "for sel in unique_selects (exp.arg (i), sel_keys): yield sel if z3.is_select (exp): for", "const_variant) ) return sub def mk_fresh_args (decl, startswith=''): args = [] for i", "str (i)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c,", "side_cons: s.add (c) res = s.check () if res == z3.unsat: return mk_false", "fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return (q, fp)", "exp.is_forall (): matrix = t (z3.ForAll ([v], matrix)).as_expr () else: matrix = t", "elim_term_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e =", "(smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: print", "lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas)) # doesn't seem to quite", "that # elim_term_ite introduces new existentially quantified variables which can # be hard", "(smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file)", "[] for i in range (len (post_consts)): post_key = post_const_keys [i] if post_key", "def mk_const_variant (const, variant): name = '{}_{}'.format (const.decl ().name (), variant) return z3.Const", "(expr): return ([], expr) consts = list () for i in reversed (range", "1==const) and to avoid repetitions of terms, we return (None,eq) for the #", "\"\"\" A version of z3.AstRef.translate that handles sorts and function declarations correctly\"\"\" if", "following with the caveat that # elim_term_ite introduces new existentially quantified variables which", "full_qe (exp): temp = qe_lite (exp) return qe (temp) def qe_sat (exp): t", "z3.And (*args) elif z3.is_or (exp): return z3.Or (*args) elif is_ite (exp): impl1 =", "(expr.num_vars ())): v_name = expr.var_name (i) v_sort = expr.var_sort (i) consts.append (z3.Const (v_name,", "exp.is_forall () (exist_consts, e) = strip_qblock (exp) if not z3.is_quantifier (e): # just", "assert z3.is_quantifier (exp) assert not exp.is_forall () (qvars, matrix) = strip_qblock (exp) s", "(sub, None, None) inst = s.model () new_insts.append (inst) print 'New instance:\\n', inst", "(arg0-e2) else: if is_add: ret_val = process_eq (arg0, e2-arg1) else: ret_val = process_eq", "for c in unique_const_leaves (exp): res.append (c) return res def mk_const_variant (const, variant):", "(x, ctx): \"\"\" A version of z3.AstRef.translate that handles sorts and function declarations", "'Model for new witness:\\n', m sub = mk_subst_from_model (m, exist_consts, model_completion=True) print 'New", "exp_key (exp2) # iterator for declarations of arguments of exp # if arguments", "return mk_false (ctx=exp.ctx) m = s.model () sub = mk_subst_from_model (m, qvars, model_completion=True)", "= mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None, None) else: assert e.is_forall ()", "z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier (exp):", "const_variant = mk_const_variant (const, variant) sub.append ( (const, const_variant) ) return sub def", "if is_add: ret_val = z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2) else: if", "if z3.eq (t,e): found = True break if not found: eq_terms.append (e) return", "preds def print_lemmas (fp): preds = get_preds (fp) print for pred in preds.itervalues", "(z3.substitute (matrix, *sub))) print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res", "res def mk_const_variant (const, variant): name = '{}_{}'.format (const.decl ().name (), variant) return", "is not None: for inst in given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True)", "leaf in insert_and_yield (exp): yield leaf elif z3.is_app (exp): for i in range", "def extract_consts (exp): res = [] for c in unique_const_leaves (exp): res.append (c)", "are the array and the idx for sel in unique_selects (exp.arg (i), sel_keys):", "= 'r'+str(i) fp.add_rule (rule, name=name) rules [name] = rule return (q, fp, rules)", "False def unique_selects (exp, sel_keys=None): def insert_and_yield (e): k = exp_key (e) if", "()) for i in range (val_interp.num_entries ()): entry = val_interp.entry (i) val =", "for l in unique_leaves (exp): if z3.is_var (l): yield l def exp_has_const_leaf (exp,", "of exp) def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp)", "side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is not None: for", "(given_fp) fp = z3.Fixedpoint (ctx=ctx) rules = dict () # map from names", "if cex_model is not None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO:", "z3.Exists (exist_consts, e) return qe_lite (e) # obtain an under-approx of exp (an", "args = map (elim_bool_ite, exp.children ()) # need to worry about And and", "arg1,arg0 # swap if has_const (arg0, const): t = process_eq (arg0, arg1) if", "def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert (e): found = False for", "a model m of qvars consistent with (matrix /\\ side_cons) # the under-approx.", "= val_interp.entry (i) val = z3.Store (val, entry.arg_value (0), entry.value ()) else: val", "if ctx is None: return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list", "of the same equality (e.g. const==1 # vs. 1==const) and to avoid repetitions", "not None: for c in side_cons: s.add (c) res = s.check () if", "z3.eq (l, const): found = True break return found # iterator for all", "and E are univ_consts and exist_consts # add a new set of exist", "model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall ()", "counterexamples of increasing size; # in other words, we solve the negation of", "( (c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp", "'Size:', cex_size+1 # TODO: split cex_model into list of models for the original", "fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr):", "matrix)).as_expr () else: matrix = t (z3.Exists ([v], matrix)).as_expr () e = matrix", "c in side_cons: s.add (c) res = s.check () if res == z3.unsat:", "not z3.is_quantifier (e): # just an smt problem m = check_sat (e) if", "decl(*args) doesn't seem to work with the py interface if z3.is_and (exp): return", "appears in simple arithmetic terms and the coefficient # of const is 1;", "look for a cex of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not", "(ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if", "and the idx for sel in unique_selects (exp.arg (i), sel_keys): yield sel if", "use z3.eq instead def exp_key (e): return e.ast.value def match_exp (exp1, exp2): if", "(val_interp, z3.FuncInterp) : idx_sort = sort.domain () val_sort = sort.range () val =", "i in range (exp.num_vars ()): e = t (e).as_expr () if not z3.is_quantifier", "(): is_forall = True (qvars, matrix) = strip_qblock (exp) exp = z3.Exists (qvars,", "True return False def unique_selects (exp, sel_keys=None): def insert_and_yield (e): k = exp_key", "(ctx=ctx) else : assert x.is_real () return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef):", "qe (temp) def qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr", "given_insts is a list of instances for the universals given by the user", "(curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if", "get_preds (fp) print for pred in preds.itervalues (): print 'Lemmas for predicate: ',", "(e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD)", "the problem' cex_size = 0 curr_exist_consts = [] curr_matrix_disjs = [] for i", "(val_interp is not None) and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort", "= z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix),", "qe (exp): if not z3.is_quantifier (exp): return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr", "*sorts) if ctx is None: return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def", "(qvars, matrix) = strip_qblock (qf_exp) if len (qvars) > 0: res = z3.ForAll", "(None, new_insts, witnesses) m = w.model () witnesses.append (m) print 'Model for new", "(): return z3.ForAll (qvars, matrix) else: return z3.Exists (qvars, matrix) t = z3.Tactic", "introduces new constants which need to be existentially quantified post_consts = extract_consts (e)", "# # assume that const only appears in simple arithmetic terms and the", "i in range (cex_size): sub = [] for c in univ_consts: name =", "if z3.is_eq (exp): arg0 = exp.arg (0) arg1 = exp.arg (1) if has_const", "print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res = s.check ()", "(e).as_expr () if not z3.is_quantifier (e): return e if z3.is_quantifier (e): # invoke", "m [const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp) : idx_sort", "but looks for counterexamples of increasing size; # in other words, we solve", "return t (exp).as_expr () def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor", "sampling based method for quant. alternation; # given_insts is a list of instances", "is not None) and isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain () val_sort", "True: print 'CURRENT SIZE:', cex_size+1 # look for a cex of size 'cex_size'", "== ctx: return x if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort", "matrix = elim_bool_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else:", "exp, eq_terms=None, done_exp=None): def insert (e): found = False for t in eq_terms:", "pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False) fp.set", "all equality terms on const; # # each pair (t,eq) is such that", "() B = z3.BoolSort () def z3_translate (x, ctx): \"\"\" A version of", "set of # universal variables return (None, cex_model, witnesses) else: # no cex", "(arg0, arg1) if t is not None: if insert (t): yield (t, exp)", "None: done_exp = [] for e in done_exp: if e.eq (exp): return #", "done_exp = [] for e in done_exp: if e.eq (exp): return # sub-dag", "(exp): const_variant = mk_const_variant (const, variant) sub.append ( (const, const_variant) ) return sub", "not in pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts) > 0: e =", "qe_lite (exp) return qe (temp) def qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx)", "(*args) elif is_ite (exp): impl1 = z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not", "generate_proof_trace=False) q = fp.parse_file (smt2file) return (q, fp) def create_named_fp (smt2file, ctx=None, pp=False,", "z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp = z3.Fixedpoint", "y) == 3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert (e):", "if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain ()", "= s.check () if res == z3.sat: return s.model () else: return None", "'FALSE\\n', new_insts return (None, new_insts, witnesses) m = w.model () witnesses.append (m) print", "def mk_eqs_from_model (m, consts, model_completion=False): eqs = [] for const in consts: #", "boolean atom # (i.e. an atom in the boolean structure of exp) def", "return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 =", "new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll", "that no equality term appears in another equality term # (so, we can't", "engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: print 'No pre-processing' fp.set", "de-bruijn variables, they are ignored def arg_decls (exp): for i in range (exp.num_args", "for c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (cex_size)) const", "if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add =", "s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr", "range (val_interp.num_entries ()): entry = val_interp.entry (i) val = z3.Store (val, entry.arg_value (0),", "x, y) == 3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert", "or z3.is_rational_value (exp)): for leaf in insert_and_yield (exp): yield leaf elif z3.is_app (exp):", "(qvars, matrix) = strip_qblock (exp) matrix = elim_term_ite (matrix) if exp.is_forall (): e", "and not exp.is_forall () (exist_consts, e) = strip_qblock (exp) if not z3.is_quantifier (e):", "qe_lite followed by qe def full_qe (exp): temp = qe_lite (exp) return qe", "and # decl(*args) doesn't seem to work with the py interface if z3.is_and", "z3.is_bool (exp): return exp if z3.is_true (exp) or z3.is_false (exp): return exp assert", "not z3.is_quantifier (e): return e if z3.is_quantifier (e): # invoke qe_lite for each", "to avoid repetitions of terms, we return (None,eq) for the # duplicates (in", "const): if is_add: ret_val = z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2) else:", "witness:' for cons in w_cons: print cons.sexpr () w = z3.Solver (ctx=exp.ctx) for", "function declarations correctly\"\"\" if x.ctx == ctx: return x if isinstance (x, z3.BoolSortRef):", "lemmas = [] for l in range (lvl, fp.get_num_levels (pred) + 1): lemmas.append", "res = s.check () if res == z3.sat: return s.model () else: return", "elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_bool_ite", "= z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1, impl2) else: return decl (*args)", "z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const) ) new_disj =", "() def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix", "sel if z3.is_select (exp): for sel in insert_and_yield (exp): yield sel def extract_consts", "if z3.is_app (arg): yield arg.decl () def has_const (exp, const): found = False", "free variables; # # let exp = Exists (qvars, matrix) # obtain a", "the under-approx. is obtained as \"matrix [m/qvars]\" # # this is the weakest", "strip_qblock (e) print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix", "return e pre_consts = extract_consts (exp) pre_const_keys = map (exp_key, pre_consts) t =", "w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check () == z3.unsat: print 'TRUE\\n', sub", "return exp_key (exp1) == exp_key (exp2) # iterator for declarations of arguments of", "(qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars,", "(exp): if not z3.is_quantifier (exp): return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr ()", "is not None: if insert (t): yield (t, exp) else: yield (None, exp)", "return e if z3.is_quantifier (e): # invoke qe_lite for each variable, separately (qvars,", "when it appears as a boolean atom # (i.e. an atom in the", "s.model () new_insts.append (inst) print 'New instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts,", "(exp1, exp2): if exp1 is None and exp2 is None: return True if", "('qe-light', ctx=exp.ctx) # invoke qe_lite once per quantified variable, for better result for", "model_completion=model_completion) eqs.append (const == val) return eqs def qe_array (exp): if not z3.is_quantifier", "for i in range (val_interp.num_entries ()): entry = val_interp.entry (i) val = z3.Store", "matrix) else: e = z3.Exists (qvars, matrix) return e if not z3.is_bool (exp):", "new_insts.append (inst) print 'New instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append", "(exp) for leaf in insert_and_yield (exp): yield leaf def unique_const_leaves (exp): for l", "(matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix) =", "matrix return e def qe (exp): if not z3.is_quantifier (exp): return exp return", "head_decl head_decl = head.decl () # ensure head_decl is in preds head_key =", "def exp_key (e): return e.ast.value def match_exp (exp1, exp2): if exp1 is None", "swap if has_const (arg0, const): t = process_eq (arg0, arg1) if t is", "if not z3.is_quantifier (exp): return exp e = exp t = z3.Tactic ('qe-light',", "fp def strip_qblock (expr): if not z3.is_quantifier (expr): return ([], expr) consts =", "print 'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO: split cex_model into list of", "post_consts) exist_consts = [] for i in range (len (post_consts)): post_key = post_const_keys", "is None: return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list (l, ctx):", "fp.get_cover_delta (i, pred)) print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred)) print def", "to quite work when new expressions are created -- use z3.eq instead def", "mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like above, but looks", "to BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier", "qe_lite for each variable, separately (qvars, matrix) = strip_qblock (e) for v in", "we can't handle \"(const==0)==b\", \"ite (const==0, x, y) == 3\", etc.) def unique_eq_terms_on_const", "for witness:' for cons in w_cons: print cons.sexpr () w = z3.Solver (ctx=exp.ctx)", "(inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock", "ignored def arg_decls (exp): for i in range (exp.num_args ()) : arg =", "m in unique_const_leaves (exp): if l.eq (m): return True return False def unique_selects", "0: e = z3.Exists (exist_consts, e) return qe_lite (e) # obtain an under-approx", "= Exists (qvars, matrix) # obtain a model m of qvars consistent with", "(exp): (qvars, matrix) = strip_qblock (exp) matrix = cofactor_term_ite (matrix) if exp.is_forall ():", "True if exp1 is None and exp2 is not None: return False if", "(matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model,", "(ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx) for i", "e1.arg (0) arg1 = e1.arg (1) if z3.eq (arg1, const): if is_add: ret_val", "(m, exist_consts, model_completion=True) print 'New witness:', sub # check if the witness is", "they can take >2 args and # decl(*args) doesn't seem to work with", "const): found = True break return found # iterator for all equality terms", "elim_bool_ite (e) # Alternatively, we could have done the following with the caveat", "= [] for c in unique_const_leaves (exp): res.append (c) return res def mk_const_variant", "matrix) # obtain a model m of qvars consistent with (matrix /\\ side_cons)", "eq_terms, done_exp): yield (t, eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield", "z3.Fixedpoint (ctx=ctx) if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set", "(head_decl) if head_key in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()] = head_decl", "mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None, None) else: assert e.is_forall () (univ_consts,", "(matrix, *sub)) # like above, but looks for counterexamples of increasing size; #", "qe_lite once per quantified variable, for better result for i in range (exp.num_vars", "def strip_qblock (expr): if not z3.is_quantifier (expr): return ([], expr) consts = list", "() args = map (elim_bool_ite, exp.children ()) # need to worry about And", "= const.sort () if isinstance (sort, z3.ArraySortRef): val_interp = m [const] if (val_interp", "def full_qe (exp): temp = qe_lite (exp) return qe (temp) def qe_sat (exp):", "unique_var_leaves (exp): for l in unique_leaves (exp): if z3.is_var (l): yield l def", "if any of the witnesses already works for m in witnesses: w =", "for i in range (exp.num_vars ()): e = t (e).as_expr () if not", "matrix = cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll (qvars, matrix) else: return", "model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like above, but looks for counterexamples of", "for declarations of arguments of exp # if arguments contain de-bruijn variables, they", "if len (exist_consts) > 0: e = z3.Exists (exist_consts, e) return qe_lite (e)", "(exp): yield sel def extract_consts (exp): res = [] for c in unique_const_leaves", "on the free variables; # # let exp = Exists (qvars, matrix) #", "given by the user as a # starting point def solve_exists_forall (exp, given_insts=None,", "in sel_keys: sel_keys.append (k) yield e if sel_keys is None: sel_keys = []", "'New instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub))", "z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix)", "head where head is a QF predicate instance # obtain the head unused_,", "yield leaf else: assert z3.is_var (exp) for leaf in insert_and_yield (exp): yield leaf", "consts, model_completion=False): eqs = [] for const in consts: # treat arrays specially", "= exp.arg (i) if z3.is_app (arg): yield arg.decl () def has_const (exp, const):", "in reversed (range (expr.num_vars ())): v_name = expr.var_name (i) v_sort = expr.var_sort (i)", "leaf in insert_and_yield (exp): yield leaf def unique_const_leaves (exp): for l in unique_leaves", "witnesses: w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute", "is an equality logically equivalent to (const==t) # appearing in exp; # #", "(matrix) if exp.is_forall (): return z3.ForAll (qvars, matrix) else: return z3.Exists (qvars, matrix)", "process_eq (arg0, arg1) if t is not None: if insert (t): yield (t,", "unique_selects (exp.arg (i), sel_keys): yield sel if z3.is_select (exp): for sel in insert_and_yield", "s = z3.Solver (ctx=exp.ctx) print 'checking validity of ', z3.substitute (matrix, *sub) s.add", "(curr_exp, model=True) if cex_model is not None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1", "z3.is_app (arg): yield arg.decl () def has_const (exp, const): found = False for", "map from names to rules if not pp: print 'No pre-processing' fp.set (slice=False)", "eq_terms = [] if done_exp is None: done_exp = [] for e in", "for leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf else: assert z3.is_var (exp)", "I = z3.IntSort () B = z3.BoolSort () def z3_translate (x, ctx): \"\"\"", "(z3.Not (z3.substitute (matrix, *sub))) print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr ()", "e pre_consts = extract_consts (exp) pre_const_keys = map (exp_key, pre_consts) t = z3.Tactic", "(const, variant): name = '{}_{}'.format (const.decl ().name (), variant) return z3.Const (name, const.sort", "(q, fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx)", "else: val = m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) eqs.append", "starting point def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall exp:', exp assert", "exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed by qe def", "if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_term_ite (matrix) if", "rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not", "= s.model () new_insts.append (inst) print 'New instance:\\n', inst sub = mk_subst_from_model (inst,", "head = matrix.arg (1) else: head = matrix assert head is not None", "vars. body => head where head is a QF predicate instance # obtain", "z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 = e1.arg (1) if z3.eq", "is None: done_exp = [] for e in done_exp: if e.eq (exp): return", "z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or", "yield e if sel_keys is None: sel_keys = [] # post-order if z3.is_app", "in unique_leaves (exp): if not z3.is_var (l): yield l def unique_var_leaves (exp): for", "else: return z3.And (*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp =", "z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 = e1.arg (1) if z3.eq (arg1, const):", "(t, exp) else: yield (None, exp) elif z3.is_app (exp): for i in range", "matrix) = strip_qblock (exp) exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic", "fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr): if not z3.is_quantifier (expr):", "obtain an under-approx of exp (an existentially quantified fml) under the # constraints", "(z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:', cex_size+1 # look", "size # check if any of the witnesses already works for m in", "model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check () == z3.unsat: print 'TRUE\\n',", "exp1 is not None: return False return exp_key (exp1) == exp_key (exp2) #", "(qvars, matrix) = strip_qblock (exp) matrix = cofactor_term_ite (matrix) if exp.is_forall (): return", "# decl(*args) doesn't seem to work with the py interface if z3.is_and (exp):", "interface if z3.is_and (exp): return z3.And (*args) elif z3.is_or (exp): return z3.Or (*args)", "= mk_const_variant (const, variant) sub.append ( (const, const_variant) ) return sub def mk_fresh_args", "qe def full_qe (exp): temp = qe_lite (exp) return qe (temp) def qe_sat", "given_preds.itervalues (): fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules ()): name = 'r'+str(i)", "is not None: return False if exp2 is None and exp1 is not", "in range (exp.num_args ()): for leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf", "<filename>stats/scripts/z3_utils.py #!/usr/bin/env python ############################################ # # Some utility routines for Z3 # ############################################", "t (exp).as_expr () # tactic introduces new constants which need to be existentially", "sel_keys is None: sel_keys = [] # post-order if z3.is_app (exp): for i", "z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is not None: for cons in side_cons:", "univ_consts print 'Matrix:', matrix w_cons = [] # constraints for witness # initialize", "leaf else: assert z3.is_var (exp) for leaf in insert_and_yield (exp): yield leaf def", "use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr): if not z3.is_quantifier (expr): return ([],", "consts:', univ_consts print 'Matrix:', matrix w_cons = [] # constraints for witness #", "variant) return z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp, variant): sub = []", "= process_eq (arg0, arg1) if t is not None: if insert (t): yield", "(qvars, z3.Not (matrix)) else: res = z3.Not (matrix) else: res = qf_exp return", "the original set of # universal variables return (None, cex_model, witnesses) else: #", "model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) eqs.append (const == val) return eqs", "() if res == z3.unsat: return mk_false (ctx=exp.ctx) m = s.model () sub", "(exp): if l.eq (m): return True return False def unique_selects (exp, sel_keys=None): def", "e if sel_keys is None: sel_keys = [] # post-order if z3.is_app (exp):", "of instances for the universals given by the user as a # starting", "arg0 = e1.arg (0) arg1 = e1.arg (1) if z3.eq (arg1, const): if", "= z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr () # tactic introduces new", "def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not", "other words, we solve the negation of the given problem, looking for #", "check if fml is sat with given side constraints def check_sat (fml, side_cons=None):", "# each pair (t,eq) is such that eq is an equality logically equivalent", "the weakest under-approx. if side_cons is a point def under_approx_qe (exp, side_cons=None): assert", "matrix w_cons = [] # constraints for witness # initialize w with given_insts", "of arguments of exp # if arguments contain de-bruijn variables, they are ignored", "(z3_translate (x.range (), ctx)) return z3.Function (x.name (), *sorts) if ctx is None:", "isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int (): return", "val = m.eval (const, model_completion=model_completion) eqs.append (const == val) return eqs def qe_array", "sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts = list", "sub # check if the witness is sufficient s = z3.Solver (ctx=exp.ctx) print", "elim_bool_ite (exp) #e = elim_term_ite (e) return e # sampling based method for", "z3.is_app (exp): for i in range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const,", "('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars,", "= [] for const in unique_const_leaves (exp): const_variant = mk_const_variant (const, variant) sub.append", "= z3.Exists (exist_consts, e) return qe_lite (e) # obtain an under-approx of exp", "if is_add: ret_val = process_eq (arg0, e2-arg1) else: ret_val = process_eq (arg0, e2+arg1)", "e = matrix return e def qe (exp): if not z3.is_quantifier (exp): return", "(exp, const): found = False for l in unique_const_leaves (exp): if z3.eq (l,", "a point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall ()", "if leaf_keys is None: leaf_keys = [] if z3.is_const (exp) and not (z3.is_int_value", "(exp, l): for m in unique_const_leaves (exp): if l.eq (m): return True return", "# constraints 'side_cons' on the free variables; # # let exp = Exists", "None, None) else: assert e.is_forall () (univ_consts, matrix) = strip_qblock (e) print 'Exist", "# no cex of current size # check if any of the witnesses", "of the given problem, looking for # counterexamples of increasing size, similar to", "z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1, impl2)", "(x.range (), ctx)) return z3.Function (x.name (), *sorts) if ctx is None: return", "t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr () # tactic introduces", "constraints 'side_cons' on the free variables; # # let exp = Exists (qvars,", "(), *sorts) if ctx is None: return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx)", "return preds def print_lemmas (fp): preds = get_preds (fp) print for pred in", "= z3.ForAll (qvars, z3.Not (matrix)) else: res = z3.Not (matrix) else: res =", "(validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation (pred) for", "mk_eqs_from_model (m, consts, model_completion=False): eqs = [] for const in consts: # treat", "(exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_term_ite (matrix)", "(impl1, impl2) else: return decl (*args) def elim_ite (exp): e = cofactor_term_ite (exp)", "return z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None): if len (args) == 0:", "ctx) for i in range (x.arity ())] sorts.append (z3_translate (x.range (), ctx)) return", "above example, we yield (1,const==1) and (None,1==const)); # # assume that const only", "leaf_keys.append (k) yield e if leaf_keys is None: leaf_keys = [] if z3.is_const", "ctx: return x if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x):", "n = fp.get_num_levels (pred) for i in range (n): print '{} : {}'.format", "(slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp", "+ 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And", "() # tactic introduces new constants which need to be existentially quantified post_consts", "Some utility routines for Z3 # ############################################ import z3 I = z3.IntSort ()", "obtain witness for instances res = w.check () if res == z3.unsat: print", "= t (z3.Exists ([v], matrix)).as_expr () e = matrix return e def qe", "of exp # if arguments contain de-bruijn variables, they are ignored def arg_decls", "return s.model () else: return None def mk_subst_from_model (m, consts, model_completion=False): sub =", "variable, for better result for i in range (exp.num_vars ()): e = t", "t is not None: if insert (t): yield (t, exp) else: yield (None,", "qe_lite (e) # obtain an under-approx of exp (an existentially quantified fml) under", "= mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check ()", "are univ_consts and exist_consts # add a new set of exist consts sub", "= [] curr_matrix_disjs = [] for i in range (cex_size): sub = []", "matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else:", "() (univ_consts, matrix) = strip_qblock (e) print 'Exist consts:', exist_consts print 'Univ consts:',", "fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for", "if model: return (m, None, None) sub = mk_subst_from_model (m, exist_consts, model_completion=True) return", "(len (post_consts)): post_key = post_const_keys [i] if post_key not in pre_const_keys: exist_consts.append (post_consts", "(const, model_completion=model_completion) sub.append ( (const, val) ) return sub def mk_eqs_from_model (m, consts,", "= post_const_keys [i] if post_key not in pre_const_keys: exist_consts.append (post_consts [i]) if len", "= z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1,", "if l.eq (m): return True return False def unique_selects (exp, sel_keys=None): def insert_and_yield", "args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1, impl2) else: return", "obtain head_decl head_decl = head.decl () # ensure head_decl is in preds head_key", "for t in eq_terms: if z3.eq (t,e): found = True break if not", "(matrix, *sub) def nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr", "[] for c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (i))", "range (x.arity ())] sorts.append (z3_translate (x.range (), ctx)) return z3.Function (x.name (), *sorts)", "() e = matrix return e def qe (exp): if not z3.is_quantifier (exp):", "z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp, variant): sub = [] for const", "matrix)).as_expr () e = matrix return e def qe (exp): if not z3.is_quantifier", "for predicate: ', pred n = fp.get_num_levels (pred) for i in range (n):", "( (c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while", "sub.append ( (c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj)", "const==1 # vs. 1==const) and to avoid repetitions of terms, we return (None,eq)", "in w_cons: print cons.sexpr () w = z3.Solver (ctx=exp.ctx) for cons in w_cons:", "e def qe (exp): if not z3.is_quantifier (exp): return exp return z3.Tactic ('qe',", "(x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int (): return z3.IntSort", "z3.unsat: print 'TRUE\\n', sub if model: return (m, None, None) return (sub, None,", "*sub)) # like above, but looks for counterexamples of increasing size; # in", "# Exists U1,U2,..U_cex_size. Forall E. Not (matrix), # where U and E are", "is_forall = True (qvars, matrix) = strip_qblock (exp) exp = z3.Exists (qvars, z3.Not", "name = '{}_{}'.format (c.decl ().name (), str (i)) const = z3.Const (name, c.sort", "return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed by qe", "(inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file)", "is obtained as \"matrix [m/qvars]\" # # this is the weakest under-approx. if", "new_insts = list () witnesses = list () while True: print 'Solver for", "form: # Forall vars. body => head where head is a QF predicate", "cex_size+1 # TODO: split cex_model into list of models for the original set", "matrix) def get_preds (fp): preds = dict () pred_keys = [] for rule_exp", "the caveat that # elim_term_ite introduces new existentially quantified variables which can #", "head_key = exp_key (head_decl) if head_key in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name", "(univ_consts, matrix) = strip_qblock (e) print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts", "if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = cofactor_term_ite (matrix) if", "None and exp2 is not None: return False if exp2 is None and", "z3.Not (matrix)) else: res = z3.Not (matrix) else: res = qf_exp return res", "assert z3.is_quantifier (exp) and not exp.is_forall () (exist_consts, e) = strip_qblock (exp) if", "assert z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return", "(i) consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body (), *consts) return (consts,", "(validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr): if not z3.is_quantifier", "inst = s.model () new_insts.append (inst) print 'New instance:\\n', inst sub = mk_subst_from_model", "in insert_and_yield (exp): yield leaf elif z3.is_app (exp): for i in range (exp.num_args", "if exp1 is None and exp2 is not None: return False if exp2", "c in unique_const_leaves (exp): res.append (c) return res def mk_const_variant (const, variant): name", "def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix =", "z3.sat: return s.model () else: return None def mk_subst_from_model (m, consts, model_completion=False): sub", "= strip_qblock (e) for v in qvars: if exp.is_forall (): matrix = t", "print 'Model for new witness:\\n', m sub = mk_subst_from_model (m, exist_consts, model_completion=True) print", "else: val = m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) sub.append", "str (cex_size)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c,", "(qvars, matrix) = strip_qblock (exp) exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp =", "quant. alternation; # given_insts is a list of instances for the universals given", "new_p = (z3_translate (a, ctx), z3_translate (b, ctx)) res.append (new_p) return res def", "z3.eq instead def exp_key (e): return e.ast.value def match_exp (exp1, exp2): if exp1", "(m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check () == z3.unsat:", "= z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2) else: if is_add: ret_val =", "()) : arg = exp.arg (i) if z3.is_app (arg): yield arg.decl () def", "user as a # starting point def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists", "(inst) print 'New instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute", "appears as a boolean atom # (i.e. an atom in the boolean structure", "pred n = fp.get_num_levels (pred) for i in range (n): print '{} :", "equality (e.g. const==1 # vs. 1==const) and to avoid repetitions of terms, we", "(matrix) else: res = qf_exp return res def qe_lite (exp): if not z3.is_quantifier", "() if res == z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts, witnesses) m", "post_key not in pre_const_keys: exist_consts.append (post_consts [i]) if len (exist_consts) > 0: e", "get_level_lemmas (fp, lvl, pred): lemmas = [] for l in range (lvl, fp.get_num_levels", "an under-approx of exp (an existentially quantified fml) under the # constraints 'side_cons'", "= z3.Fixedpoint (ctx=ctx) rules = dict () # map from names to rules", "witness:\\n', m sub = mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:', sub #", "(ctx=exp.ctx) for cons in w_cons: w.add (cons) # obtain witness for instances res", "such that eq is an equality logically equivalent to (const==t) # appearing in", "val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value ()) for i in range", "size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not (matrix), # where U and", "(i) sort = decl.domain (i) args.append (z3.Const (name, sort)) return args # check", "() if isinstance (sort, z3.ArraySortRef): val_interp = m [const] if (val_interp is not", "(qvars, matrix) return e pre_consts = extract_consts (exp) pre_const_keys = map (exp_key, pre_consts)", "for const in unique_const_leaves (exp): const_variant = mk_const_variant (const, variant) sub.append ( (const,", "not None) and isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain () val_sort =", "(matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e = z3.Exists", "need to be existentially quantified post_consts = extract_consts (e) post_const_keys = map (exp_key,", "# ensure head_decl is in preds head_key = exp_key (head_decl) if head_key in", "(ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx)", "in unique_const_leaves (exp): if l.eq (m): return True return False def unique_selects (exp,", "= map (elim_bool_ite, exp.children ()) # need to worry about And and Or", "under-approx of exp (an existentially quantified fml) under the # constraints 'side_cons' on", "sub.append ( (const, val) ) return sub def mk_eqs_from_model (m, consts, model_completion=False): eqs", "#!/usr/bin/env python ############################################ # # Some utility routines for Z3 # ############################################ import", "(lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred))", "range (lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1,", "= fp.get_num_levels (pred) for i in range (n): print '{} : {}'.format (i,", "const): ret_val = z3.simplify (e2) else: assert z3.is_app (e1) if not (z3.is_app_of (e1,", "for the # duplicates (in the above example, we yield (1,const==1) and (None,1==const));", "z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once per quantified variable, for better result", "(qvars, matrix) else: e = z3.Exists (qvars, matrix) return e pre_consts = extract_consts", "dict () pred_keys = [] for rule_exp in fp.get_rules (): # rule_exp is", "(l, const): found = True break return found # iterator for all equality", "given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts =", "exist consts sub = [] for c in univ_consts: name = '{}_{}'.format (c.decl", "consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix w_cons = [] #", "(rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head = matrix", "if not z3.is_quantifier (exp): return exp is_forall = False if exp.is_forall (): is_forall", "above, but looks for counterexamples of increasing size; # in other words, we", "return False return exp_key (exp1) == exp_key (exp2) # iterator for declarations of", "'{} : {}'.format (i, fp.get_cover_delta (i, pred)) print '{} : {}'.format ('oo', fp.get_cover_delta", "val = m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) sub.append (", "no cex of current size # check if any of the witnesses already", "()): e = t (e).as_expr () if not z3.is_quantifier (e): return e if", "yield sel def extract_consts (exp): res = [] for c in unique_const_leaves (exp):", "# # this is the weakest under-approx. if side_cons is a point def", "(exp): return z3.And (*args) elif z3.is_or (exp): return z3.Or (*args) elif is_ite (exp):", "exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix print 'Solving by negating the", "equality logically equivalent to (const==t) # appearing in exp; # # to accommodate", "z3.Z3_OP_XOR) # rewrite ite as conjunction of implications when it appears as a", "yield (t, exp) else: yield (None, exp) elif z3.is_app (exp): for i in", "z3.K(val_sort, val_interp.else_value ()) for i in range (val_interp.num_entries ()): entry = val_interp.entry (i)", "= z3.Fixedpoint (ctx=ctx) if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False)", "use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return (q, fp) def create_named_fp (smt2file, ctx=None,", "= z3.Exists (qvars, matrix) return e pre_consts = extract_consts (exp) pre_const_keys = map", "not exp.is_forall () (exist_consts, e) = strip_qblock (exp) if not z3.is_quantifier (e): #", "yield (None, exp) elif z3.is_app (exp): for i in range (exp.num_args ()): for", "i in range (decl.arity ()): name = startswith + str (i) sort =", "list () while True: print 'Solver for witness:' for cons in w_cons: print", "const in unique_const_leaves (exp): const_variant = mk_const_variant (const, variant) sub.append ( (const, const_variant)", "sel in unique_selects (exp.arg (i), sel_keys): yield sel if z3.is_select (exp): for sel", "model_completion=model_completion) sub.append ( (const, val) ) return sub def mk_eqs_from_model (m, consts, model_completion=False):", "(temp) def qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr ()", "exp_has_const_leaf (exp, l): for m in unique_const_leaves (exp): if l.eq (m): return True", "atom in the boolean structure of exp) def elim_bool_ite (exp): if z3.is_quantifier (exp):", "z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx) for", "(b, ctx)) res.append (new_p) return res def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx)", "z3.is_select (exp): for sel in insert_and_yield (exp): yield sel def extract_consts (exp): res", "of size 'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not (matrix), # where U", "(e1, e2): if z3.eq (e1, const): ret_val = z3.simplify (e2) else: assert z3.is_app", "return z3.And (*args) elif z3.is_or (exp): return z3.Or (*args) elif is_ite (exp): impl1", "= z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier", "the witnesses already works for m in witnesses: w = z3.Solver (ctx=exp.ctx) sub", "const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print", "if has_const (arg1, const): arg0,arg1 = arg1,arg0 # swap if has_const (arg0, const):", "z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed by qe def full_qe (exp):", "with (matrix /\\ side_cons) # the under-approx. is obtained as \"matrix [m/qvars]\" #", "w.check () == z3.unsat: print 'TRUE\\n', sub if model: return (m, None, None)", "e = elim_bool_ite (e) # Alternatively, we could have done the following with", "'TRUE\\n', sub if model: return (m, None, None) return (sub, None, None) inst", "e if not z3.is_bool (exp): return exp if z3.is_true (exp) or z3.is_false (exp):", "None: return True if exp1 is None and exp2 is not None: return", "res def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal", "strip_qblock (exp) matrix = cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll (qvars, matrix)", "c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (cex_size)) const =", "(e): k = exp_key (e) if k not in sel_keys: sel_keys.append (k) yield", "(e): return e if z3.is_quantifier (e): # invoke qe_lite for each variable, separately", "exp.is_forall () (qvars, matrix) = strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix)", "is in preds head_key = exp_key (head_decl) if head_key in pred_keys: continue pred_keys.append", "return qe_lite (e) # obtain an under-approx of exp (an existentially quantified fml)", "exist_consts = [] for i in range (len (post_consts)): post_key = post_const_keys [i]", "models for the original set of # universal variables return (None, cex_model, witnesses)", "for each variable, separately (qvars, matrix) = strip_qblock (e) for v in qvars:", "ret_val = process_eq (arg0, e2+arg1) return ret_val if eq_terms is None: eq_terms =", "sub = mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix, *sub) def nnf (exp):", "Not (matrix), # where U and E are univ_consts and exist_consts # add", "s.add (c) res = s.check () if res == z3.unsat: return mk_false (ctx=exp.ctx)", "alternative representations of the same equality (e.g. const==1 # vs. 1==const) and to", "side_cons: s.add (cons) res = s.check () if res == z3.sat: return s.model", "model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion) sub.append ( (const, val) ) return", "found = False for t in eq_terms: if z3.eq (t,e): found = True", "= strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is not", "for inst in given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix,", "z3.is_quantifier (exp): return exp return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr () # qe_lite followed", "terms on const; # # each pair (t,eq) is such that eq is", "found = False for l in unique_const_leaves (exp): if z3.eq (l, const): found", "is a QF predicate instance # obtain the head unused_, matrix = strip_qblock", "(given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule, name=name) rules [name] = rule return", "pair (t,eq) is such that eq is an equality logically equivalent to (const==t)", "equality term appears in another equality term # (so, we can't handle \"(const==0)==b\",", "are ignored def arg_decls (exp): for i in range (exp.num_args ()) : arg", "witnesses) else: # no cex of current size # check if any of", "(matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver for validity:', z3.Not (z3.substitute", "(const==t) # appearing in exp; # # to accommodate for alternative representations of", "in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (i)) const = z3.Const", "res = [] for (a,b) in l: new_p = (z3_translate (a, ctx), z3_translate", "exp_key (e) if k not in sel_keys: sel_keys.append (k) yield e if sel_keys", "ensure head_decl is in preds head_key = exp_key (head_decl) if head_key in pred_keys:", "initialize w with given_insts if given_insts is not None: for inst in given_insts:", "mk_const_variant (const, variant): name = '{}_{}'.format (const.decl ().name (), variant) return z3.Const (name,", "sub.append ( (const, const_variant) ) return sub def mk_fresh_args (decl, startswith=''): args =", "specially due to the else_value sort = const.sort () if isinstance (sort, z3.ArraySortRef):", "# assume that no equality term appears in another equality term # (so,", "variables; # # let exp = Exists (qvars, matrix) # obtain a model", "startswith + str (i) sort = decl.domain (i) args.append (z3.Const (name, sort)) return", "for leaf in insert_and_yield (exp): yield leaf elif z3.is_app (exp): for i in", "B = z3.BoolSort () def z3_translate (x, ctx): \"\"\" A version of z3.AstRef.translate", "() sub = mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix, *sub) def nnf", "(fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is not None:", "cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll (qvars, matrix) else: return z3.Exists (qvars,", "exist_consts, model_completion=True) print 'New witness:', sub # check if the witness is sufficient", "())): v_name = expr.var_name (i) v_sort = expr.var_sort (i) consts.append (z3.Const (v_name, v_sort))", "None) and isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain () val_sort = sort.range", "assert not exp.is_forall () (qvars, matrix) = strip_qblock (exp) s = z3.Solver (ctx=exp.ctx)", "is None and exp1 is not None: return False return exp_key (exp1) ==", "z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None): if len (args) == 0: return", "(exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is not None: for", "as a boolean atom # (i.e. an atom in the boolean structure of", "fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return", "res def qe_lite (exp): if not z3.is_quantifier (exp): return exp e = exp", "e = exp t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once per", "new_insts, witnesses) m = w.model () witnesses.append (m) print 'Model for new witness:\\n',", "() new_insts.append (inst) print 'New instance:\\n', inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True)", "return ([], expr) consts = list () for i in reversed (range (expr.num_vars", "def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp:", "eq_terms is None: eq_terms = [] if done_exp is None: done_exp = []", "def mk_subst_from_model (m, consts, model_completion=False): sub = [] for const in consts: #", "# obtain an under-approx of exp (an existentially quantified fml) under the #", "=> head where head is a QF predicate instance # obtain the head", "() # qe_lite followed by qe def full_qe (exp): temp = qe_lite (exp)", "()) else: val = m.eval (const, model_completion=model_completion) else: val = m.eval (const, model_completion=model_completion)", "def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False,", "ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None):", "None: for cons in side_cons: s.add (cons) res = s.check () if res", "() witnesses.append (m) print 'Model for new witness:\\n', m sub = mk_subst_from_model (m,", "(z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf in insert_and_yield (exp): yield leaf elif", "s = z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is not None: for cons", "of exp (an existentially quantified fml) under the # constraints 'side_cons' on the", "assert z3.is_app (exp) decl = exp.decl () args = map (elim_bool_ite, exp.children ())", "curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall", "args # check if fml is sat with given side constraints def check_sat", "(exp) exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr", "point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall () (qvars,", "declarations of arguments of exp # if arguments contain de-bruijn variables, they are", "(exp, variant): sub = [] for const in unique_const_leaves (exp): const_variant = mk_const_variant", "e if z3.is_quantifier (e): # invoke qe_lite for each variable, separately (qvars, matrix)", "elim_term_ite introduces new existentially quantified variables which can # be hard to eliminate", "in witnesses: w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add", "z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int (): return z3.IntSort (ctx=ctx)", "(z3.Not (args[0]), args[2]) return z3.And (impl1, impl2) else: return decl (*args) def elim_ite", "size, similar to BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:', exp", "the form: # Forall vars. body => head where head is a QF", "None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO: split cex_model into list", "fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def strip_qblock (expr): if not", "insert_and_yield (exp): yield leaf elif z3.is_app (exp): for i in range (exp.num_args ()):", "mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix, *sub) def nnf (exp): t =", "qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix) = strip_qblock", "z3.simplify (arg0-e2) else: if is_add: ret_val = process_eq (arg0, e2-arg1) else: ret_val =", "E are univ_consts and exist_consts # add a new set of exist consts", "(exp): return exp is_forall = False if exp.is_forall (): is_forall = True (qvars,", "(exp): for i in range (exp.num_args ()): for leaf in unique_leaves (exp.arg (i),", "z3.IntSort (ctx=ctx) else : assert x.is_real () return z3.RealSort (ctx=ctx) if isinstance (x,", "yield leaf elif z3.is_app (exp): for i in range (exp.num_args ()): for leaf", "= exp.decl () args = map (elim_bool_ite, exp.children ()) # need to worry", "# counterexamples of increasing size, similar to BMC def solve_exists_forall_incremental (exp, model=False): print", "is 1; # # assume that no equality term appears in another equality", "return x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list (l, ctx): res =", "found = True break if not found: eq_terms.append (e) return True return False", "= process_eq (arg0, e2-arg1) else: ret_val = process_eq (arg0, e2+arg1) return ret_val if", "for l in range (lvl, fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l, pred))", "an equality logically equivalent to (const==t) # appearing in exp; # # to", "= list () while True: print 'Solver for witness:' for cons in w_cons:", "preds [head_decl.name ()] = head_decl return preds def print_lemmas (fp): preds = get_preds", "(i) v_sort = expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body", "[] for i in range (cex_size): sub = [] for c in univ_consts:", "return fp def strip_qblock (expr): if not z3.is_quantifier (expr): return ([], expr) consts", "witness is sufficient s = z3.Solver (ctx=exp.ctx) print 'checking validity of ', z3.substitute", "and exp2 is None: return True if exp1 is None and exp2 is", "they are ignored def arg_decls (exp): for i in range (exp.num_args ()) :", "unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def insert (e): found = False for t", "(exp): return exp if z3.is_true (exp) or z3.is_false (exp): return exp assert z3.is_app", "(): fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule", "(const==0, x, y) == 3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None): def", "()): for leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf else: assert z3.is_var", "eq_terms=None, done_exp=None): def insert (e): found = False for t in eq_terms: if", "z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr () # tactic introduces new constants", "z3.Exists (qvars, matrix) return e if not z3.is_bool (exp): return exp if z3.is_true", "similar to BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:', exp assert", "quantified fml) under the # constraints 'side_cons' on the free variables; # #", "when new expressions are created -- use z3.eq instead def exp_key (e): return", "def exp_has_const_leaf (exp, l): for m in unique_const_leaves (exp): if l.eq (m): return", "(fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas)) # doesn't", "= expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body (), *consts)", "#e = elim_bool_ite (exp) #e = elim_term_ite (e) return e # sampling based", "(exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) #", "v_name = expr.var_name (i) v_sort = expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix", "(ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list (l, ctx): res = [] for", "(), *consts) return (consts, matrix) def get_preds (fp): preds = dict () pred_keys", "[] for (a,b) in l: new_p = (z3_translate (a, ctx), z3_translate (b, ctx))", "= extract_consts (e) post_const_keys = map (exp_key, post_consts) exist_consts = [] for i", "(c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True:", "model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts = list () witnesses = list ()", "unique_leaves (exp): if not z3.is_var (l): yield l def unique_var_leaves (exp): for l", "is the weakest under-approx. if side_cons is a point def under_approx_qe (exp, side_cons=None):", "arg.decl () def has_const (exp, const): found = False for l in unique_const_leaves", "(m, None, None) return (sub, None, None) # increment size cex_size += 1", "matrix = t (z3.ForAll ([v], matrix)).as_expr () else: matrix = t (z3.Exists ([v],", "the coefficient # of const is 1; # # assume that no equality", "(i) if z3.is_app (arg): yield arg.decl () def has_const (exp, const): found =", "# post-order if z3.is_app (exp): for i in range (exp.num_args ()): # args", "is None and exp2 is None: return True if exp1 is None and", "(e) if k not in leaf_keys: leaf_keys.append (k) yield e if leaf_keys is", "= e1.arg (0) arg1 = e1.arg (1) if z3.eq (arg1, const): if is_add:", "[] if z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf", "()): name = startswith + str (i) sort = decl.domain (i) args.append (z3.Const", "= strip_qblock (exp) exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array',", "ite as conjunction of implications when it appears as a boolean atom #", "w_cons: w.add (cons) # obtain witness for instances res = w.check () if", "decl.domain (i) args.append (z3.Const (name, sort)) return args # check if fml is", "(), str (i)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append (", "z3.Implies (z3.Not (args[0]), args[2]) return z3.And (impl1, impl2) else: return decl (*args) def", "(args) == 0: return mk_true (ctx=ctx) else: return z3.And (*args) def create_fp (smt2file,", "(exp, sel_keys=None): def insert_and_yield (e): k = exp_key (e) if k not in", "# of const is 1; # # assume that no equality term appears", "ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: print 'No", "weakest under-approx. if side_cons is a point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier", "under the # constraints 'side_cons' on the free variables; # # let exp", "and not (z3.is_int_value (exp) or z3.is_rational_value (exp)): for leaf in insert_and_yield (exp): yield", "head_decl return preds def print_lemmas (fp): preds = get_preds (fp) print for pred", "(const, variant) sub.append ( (const, const_variant) ) return sub def mk_fresh_args (decl, startswith=''):", "solve the negation of the given problem, looking for # counterexamples of increasing", "# this is the weakest under-approx. if side_cons is a point def under_approx_qe", "sel def extract_consts (exp): res = [] for c in unique_const_leaves (exp): res.append", "'{}_{}'.format (const.decl ().name (), variant) return z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp,", "'Univ consts:', univ_consts print 'Matrix:', matrix print 'Solving by negating the problem' cex_size", "rule_exp is a quantified formula representing the rule of the form: # Forall", "model_completion=False): sub = [] for const in consts: # treat arrays specially due", "exp.is_forall (): return z3.ForAll (qvars, matrix) else: return z3.Exists (qvars, matrix) t =", "return (sub, None, None) else: assert e.is_forall () (univ_consts, matrix) = strip_qblock (e)", "s = z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is not None: for c", "(exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as conjunction of implications when", "post_key = post_const_keys [i] if post_key not in pre_const_keys: exist_consts.append (post_consts [i]) if", "continue pred_keys.append (head_key) preds [head_decl.name ()] = head_decl return preds def print_lemmas (fp):", "z3.Not (z3.substitute (matrix, *sub)).sexpr () res = s.check () if res == z3.unsat:", "pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds =", "import z3 I = z3.IntSort () B = z3.BoolSort () def z3_translate (x,", "already works for m in witnesses: w = z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model", "return res def qe_lite (exp): if not z3.is_quantifier (exp): return exp e =", "given problem, looking for # counterexamples of increasing size, similar to BMC def", "arithmetic terms and the coefficient # of const is 1; # # assume", "(exist_consts, e) = strip_qblock (exp) if not z3.is_quantifier (e): # just an smt", "( (const, const_variant) ) return sub def mk_fresh_args (decl, startswith=''): args = []", "z3.is_true (exp) or z3.is_false (exp): return exp assert z3.is_app (exp) decl = exp.decl", "engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q = given_fp.parse_file (smt2file) given_preds = get_preds", "print 'Lemmas for predicate: ', pred n = fp.get_num_levels (pred) for i in", "constraints for witness # initialize w with given_insts if given_insts is not None:", "ret_val = z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2) else: if is_add: ret_val", "= (z3_translate (a, ctx), z3_translate (b, ctx)) res.append (new_p) return res def mk_true", "leaf_keys is None: leaf_keys = [] if z3.is_const (exp) and not (z3.is_int_value (exp)", "= t (exp).as_expr () # tactic introduces new constants which need to be", "else: res = z3.Not (matrix) else: res = qf_exp return res def qe_lite", "= z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts,", "the free variables; # # let exp = Exists (qvars, matrix) # obtain", "(exp.arg (i), sel_keys): yield sel if z3.is_select (exp): for sel in insert_and_yield (exp):", "(matrix, *sub))) print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res =", "in qvars: if exp.is_forall (): matrix = t (z3.ForAll ([v], matrix)).as_expr () else:", "range (len (post_consts)): post_key = post_const_keys [i] if post_key not in pre_const_keys: exist_consts.append", "isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx) for i in range", "python ############################################ # # Some utility routines for Z3 # ############################################ import z3", "# starting point def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall exp:', exp", "val_interp.else_value ()) for i in range (val_interp.num_entries ()): entry = val_interp.entry (i) val", "ctx=exp.ctx) return t (exp).as_expr () def is_ite (exp): return z3.is_app_of (exp, z3.Z3_OP_ITE) def", "univ_consts: name = '{}_{}'.format (c.decl ().name (), str (i)) const = z3.Const (name,", "(c) return res def mk_const_variant (const, variant): name = '{}_{}'.format (const.decl ().name (),", "(None, cex_model, witnesses) else: # no cex of current size # check if", "z3.is_quantifier (exp): return exp is_forall = False if exp.is_forall (): is_forall = True", "with the py interface if z3.is_and (exp): return z3.And (*args) elif z3.is_or (exp):", "avoid repetitions of terms, we return (None,eq) for the # duplicates (in the", "strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add (matrix) if side_cons is not None:", "exp2 is not None: return False if exp2 is None and exp1 is", "i in range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms,", "(exp) and not exp.is_forall () (exist_consts, e) = strip_qblock (exp) if not z3.is_quantifier", "not z3.is_quantifier (exp): return exp is_forall = False if exp.is_forall (): is_forall =", "(e): return e.ast.value def match_exp (exp1, exp2): if exp1 is None and exp2", "z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and", "= [] for e in done_exp: if e.eq (exp): return # sub-dag is", "# the under-approx. is obtained as \"matrix [m/qvars]\" # # this is the", "if the witness is sufficient s = z3.Solver (ctx=exp.ctx) print 'checking validity of", "== z3.unsat: return mk_false (ctx=exp.ctx) m = s.model () sub = mk_subst_from_model (m,", "return sub def mk_fresh_args (decl, startswith=''): args = [] for i in range", "*consts) return (consts, matrix) def get_preds (fp): preds = dict () pred_keys =", "x.translate (ctx=z3.main_ctx ()) return x.translate (ctx) def translate_pair_list (l, ctx): res = []", "(i), leaf_keys): yield leaf else: assert z3.is_var (exp) for leaf in insert_and_yield (exp):", "s.model () else: return None def mk_subst_from_model (m, consts, model_completion=False): sub = []", "qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite", "of implications when it appears as a boolean atom # (i.e. an atom", "unique_const_leaves (exp): if z3.eq (l, const): found = True break return found #", "(e2-arg0) else: ret_val = z3.simplify (arg0-e2) else: if is_add: ret_val = process_eq (arg0,", "ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and (args, ctx=None): if len (args) ==", "'Solver for witness:' for cons in w_cons: print cons.sexpr () w = z3.Solver", "w.model () witnesses.append (m) print 'Model for new witness:\\n', m sub = mk_subst_from_model", "solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not", "is not None: return False return exp_key (exp1) == exp_key (exp2) # iterator", "(fp.get_cover_delta (-1, pred)) return z3.simplify (z3.And (*lemmas)) # doesn't seem to quite work", "exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix)", "t (exp).as_expr () def elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock", "break return found # iterator for all equality terms on const; # #", "(ctx) def translate_pair_list (l, ctx): res = [] for (a,b) in l: new_p", "[] for i in range (decl.arity ()): name = startswith + str (i)", "= z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set", "return False def unique_selects (exp, sel_keys=None): def insert_and_yield (e): k = exp_key (e)", "map (elim_bool_ite, exp.children ()) # need to worry about And and Or because", "(engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return (q, fp) def create_named_fp (smt2file,", "e2-arg1) else: ret_val = process_eq (arg0, e2+arg1) return ret_val if eq_terms is None:", "for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res = s.check () if res", "head_decl is in preds head_key = exp_key (head_decl) if head_key in pred_keys: continue", "(fp, lvl, pred): lemmas = [] for l in range (lvl, fp.get_num_levels (pred)", "(l): yield l def unique_var_leaves (exp): for l in unique_leaves (exp): if z3.is_var", "()): entry = val_interp.entry (i) val = z3.Store (val, entry.arg_value (0), entry.value ())", "e = cofactor_term_ite (exp) e = elim_bool_ite (e) # Alternatively, we could have", "existentially quantified variables which can # be hard to eliminate by qe_lite #e", "split cex_model into list of models for the original set of # universal", "'Matrix:', matrix w_cons = [] # constraints for witness # initialize w with", "sub def mk_fresh_args (decl, startswith=''): args = [] for i in range (decl.arity", "solve_exists_forall (curr_exp, model=True) if cex_model is not None: print 'FALSE\\n', cex_model print 'Size:',", "for i in range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i),", "sufficient s = z3.Solver (ctx=exp.ctx) print 'checking validity of ', z3.substitute (matrix, *sub)", "A version of z3.AstRef.translate that handles sorts and function declarations correctly\"\"\" if x.ctx", "name = 'r'+str(i) fp.add_rule (rule, name=name) rules [name] = rule return (q, fp,", "if is_forall: (qvars, matrix) = strip_qblock (qf_exp) if len (qvars) > 0: res", "()) curr_exist_consts.append (const) sub.append ( (c, const) ) new_disj = z3.substitute (z3.Not (matrix),", "ctx)) res.append (new_p) return res def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def", "(l): yield l def exp_has_const_leaf (exp, l): for m in unique_const_leaves (exp): if", "increasing size; # in other words, we solve the negation of the given", "rules [name] = rule return (q, fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr',", "z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head = matrix assert head", "new set of exist consts sub = [] for c in univ_consts: name", "(args, ctx=None): if len (args) == 0: return mk_true (ctx=ctx) else: return z3.And", "if arguments contain de-bruijn variables, they are ignored def arg_decls (exp): for i", "consts:', univ_consts print 'Matrix:', matrix print 'Solving by negating the problem' cex_size =", "= z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is not None: for cons in", "(m, exist_consts, model_completion=True) return (sub, None, None) else: assert e.is_forall () (univ_consts, matrix)", "sort = decl.domain (i) args.append (z3.Const (name, sort)) return args # check if", "(*args) def elim_ite (exp): e = cofactor_term_ite (exp) e = elim_bool_ite (e) #", "x.ctx == ctx: return x if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if", "(exp): if not z3.is_quantifier (exp): return exp is_forall = False if exp.is_forall ():", "(qvars) > 0: res = z3.ForAll (qvars, z3.Not (matrix)) else: res = z3.Not", "t (z3.ForAll ([v], matrix)).as_expr () else: matrix = t (z3.Exists ([v], matrix)).as_expr ()", "arg_decls (exp): for i in range (exp.num_args ()) : arg = exp.arg (i)", "head.decl () # ensure head_decl is in preds head_key = exp_key (head_decl) if", "#e = elim_term_ite (e) return e # sampling based method for quant. alternation;", "= elim_bool_ite (e) # Alternatively, we could have done the following with the", "for instances res = w.check () if res == z3.unsat: print 'FALSE\\n', new_insts", "= z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr () def is_ite (exp): return z3.is_app_of", "side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall () (qvars, matrix) = strip_qblock (exp)", "return z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp, variant): sub = [] for", "[const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp) : idx_sort =", "leaf elif z3.is_app (exp): for i in range (exp.num_args ()): for leaf in", "i in reversed (range (expr.num_vars ())): v_name = expr.var_name (i) v_sort = expr.var_sort", "(name, sort)) return args # check if fml is sat with given side", "(val, ctx=ctx) def mk_and (args, ctx=None): if len (args) == 0: return mk_true", "('qe-array', ctx=exp.ctx) (exp).as_expr () if is_forall: (qvars, matrix) = strip_qblock (qf_exp) if len", "temp = qe_lite (exp) return qe (temp) def qe_sat (exp): t = z3.Tactic", "z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int (val,", "i in range (len (post_consts)): post_key = post_const_keys [i] if post_key not in", "if z3.is_true (exp) or z3.is_false (exp): return exp assert z3.is_app (exp) decl =", "side_cons is a point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert not", "witness:', sub # check if the witness is sufficient s = z3.Solver (ctx=exp.ctx)", "consts = list () for i in reversed (range (expr.num_vars ())): v_name =", "val) return eqs def qe_array (exp): if not z3.is_quantifier (exp): return exp is_forall", "words, we solve the negation of the given problem, looking for # counterexamples", "iterator for declarations of arguments of exp # if arguments contain de-bruijn variables,", "exp assert z3.is_app (exp) decl = exp.decl () args = map (elim_bool_ite, exp.children", "in unique_const_leaves (exp): if z3.eq (l, const): found = True break return found", "smt problem m = check_sat (e) if m is None: return (None, None,", "() if not z3.is_quantifier (e): return e if z3.is_quantifier (e): # invoke qe_lite", "############################################ import z3 I = z3.IntSort () B = z3.BoolSort () def z3_translate", "(cons) res = s.check () if res == z3.sat: return s.model () else:", "problem' cex_size = 0 curr_exist_consts = [] curr_matrix_disjs = [] for i in", "(qf_exp) if len (qvars) > 0: res = z3.ForAll (qvars, z3.Not (matrix)) else:", "const; # # each pair (t,eq) is such that eq is an equality", "validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr () res = s.check () if res ==", "val = z3.K(val_sort, val_interp.else_value ()) for i in range (val_interp.num_entries ()): entry =", "( (const, val) ) return sub def mk_eqs_from_model (m, consts, model_completion=False): eqs =", "== val) return eqs def qe_array (exp): if not z3.is_quantifier (exp): return exp", "side_cons is not None: for cons in side_cons: s.add (cons) res = s.check", "Forall E. Not (matrix), # where U and E are univ_consts and exist_consts", "z3.is_app_of (exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite", "z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0", "(i), eq_terms, done_exp): yield (t, eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None): def", "(exp): yield leaf elif z3.is_app (exp): for i in range (exp.num_args ()): for", "[] # constraints for witness # initialize w with given_insts if given_insts is", "'Univ consts:', univ_consts print 'Matrix:', matrix w_cons = [] # constraints for witness", "cons in w_cons: print cons.sexpr () w = z3.Solver (ctx=exp.ctx) for cons in", "# elim_term_ite introduces new existentially quantified variables which can # be hard to", "[head_decl.name ()] = head_decl return preds def print_lemmas (fp): preds = get_preds (fp)", "= exp.arg (1) if has_const (arg1, const): arg0,arg1 = arg1,arg0 # swap if", "if k not in sel_keys: sel_keys.append (k) yield e if sel_keys is None:", "z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as conjunction of implications when it appears", "[] # post-order if z3.is_app (exp): for i in range (exp.num_args ()): #", "res = [] for c in unique_const_leaves (exp): res.append (c) return res def", "# (i.e. an atom in the boolean structure of exp) def elim_bool_ite (exp):", "(*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model is not None:", "if not z3.is_var (l): yield l def unique_var_leaves (exp): for l in unique_leaves", "(i) args.append (z3.Const (name, sort)) return args # check if fml is sat", "return (q, fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint", "arg0 = exp.arg (0) arg1 = exp.arg (1) if has_const (arg1, const): arg0,arg1", "mk_true (ctx=ctx) else: return z3.And (*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False):", "if m is None: return (None, None, None) else: if model: return (m,", "(x.name (), *sorts) if ctx is None: return x.translate (ctx=z3.main_ctx ()) return x.translate", "for v in qvars: if exp.is_forall (): matrix = t (z3.ForAll ([v], matrix)).as_expr", "mk_and (args, ctx=None): if len (args) == 0: return mk_true (ctx=ctx) else: return", "inst sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like", "'TRUE\\n', sub if model: return (m, None, None) return (sub, None, None) #", "().name (), str (cex_size)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append", "(e) return e # sampling based method for quant. alternation; # given_insts is", "= startswith + str (i) sort = decl.domain (i) args.append (z3.Const (name, sort))", "return qe (temp) def qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t", "ret_val if eq_terms is None: eq_terms = [] if done_exp is None: done_exp", "val = m.eval (const, model_completion=model_completion) sub.append ( (const, val) ) return sub def", "False for t in eq_terms: if z3.eq (t,e): found = True break if", "assert x.is_real () return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate", "seem to quite work when new expressions are created -- use z3.eq instead", "pred in preds.itervalues (): print 'Lemmas for predicate: ', pred n = fp.get_num_levels", "can't handle \"(const==0)==b\", \"ite (const==0, x, y) == 3\", etc.) def unique_eq_terms_on_const (const,", "for l in unique_const_leaves (exp): if z3.eq (l, const): found = True break", "extract_consts (exp): res = [] for c in unique_const_leaves (exp): res.append (c) return", "def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k = exp_key (e) if k", "None: return False return exp_key (exp1) == exp_key (exp2) # iterator for declarations", "z3.simplify (e2-arg0) else: ret_val = z3.simplify (arg0-e2) else: if is_add: ret_val = process_eq", "def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as conjunction of", "in enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule, name=name) rules [name] =", "fp.parse_file (smt2file) return (q, fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp", "(-1, pred)) print def get_level_lemmas (fp, lvl, pred): lemmas = [] for l", "(e): # invoke qe_lite for each variable, separately (qvars, matrix) = strip_qblock (e)", "z3.Or (*args) elif is_ite (exp): impl1 = z3.Implies (args[0], args[1]) impl2 = z3.Implies", "(qvars, matrix) return e if not z3.is_bool (exp): return exp if z3.is_true (exp)", "('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars,", "is not None # obtain head_decl head_decl = head.decl () # ensure head_decl", "exp.arg (i), eq_terms, done_exp): yield (t, eq) done_exp.append (exp) def unique_leaves (exp, leaf_keys=None):", "= [] for i in range (decl.arity ()): name = startswith + str", "impl2) else: return decl (*args) def elim_ite (exp): e = cofactor_term_ite (exp) e", "def insert (e): found = False for t in eq_terms: if z3.eq (t,e):", "has_const (exp, const): found = False for l in unique_const_leaves (exp): if z3.eq", "of models for the original set of # universal variables return (None, cex_model,", "strip_qblock (exp) matrix = elim_term_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars,", "fml) under the # constraints 'side_cons' on the free variables; # # let", "unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield (t, eq) done_exp.append (exp) def unique_leaves", "else: matrix = t (z3.Exists ([v], matrix)).as_expr () e = matrix return e", "fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): given_fp = z3.Fixedpoint (ctx=ctx) q", "() else: matrix = t (z3.Exists ([v], matrix)).as_expr () e = matrix return", "i in range (exp.num_args ()): # args are the array and the idx", "be existentially quantified post_consts = extract_consts (e) post_const_keys = map (exp_key, post_consts) exist_consts", "sel_keys: sel_keys.append (k) yield e if sel_keys is None: sel_keys = [] #", "w_cons.append (z3.substitute (matrix, *sub)) new_insts = list () witnesses = list () while", "not in sel_keys: sel_keys.append (k) yield e if sel_keys is None: sel_keys =", "not in leaf_keys: leaf_keys.append (k) yield e if leaf_keys is None: leaf_keys =", "pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr () # tactic", "z3.Exists (qvars, matrix) return e pre_consts = extract_consts (exp) pre_const_keys = map (exp_key,", "len (exist_consts) > 0: e = z3.Exists (exist_consts, e) return qe_lite (e) #", "return x if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if", "process_eq (arg0, e2-arg1) else: ret_val = process_eq (arg0, e2+arg1) return ret_val if eq_terms", "(exp, z3.Z3_OP_XOR) # rewrite ite as conjunction of implications when it appears as", "().name (), str (i)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append", "variables which can # be hard to eliminate by qe_lite #e = elim_bool_ite", "z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add (cons) # obtain witness for instances", "(exp): impl1 = z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2]) return", "(e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add", "= elim_bool_ite (exp) #e = elim_term_ite (e) return e # sampling based method", "else: # no cex of current size # check if any of the", "for leaf in insert_and_yield (exp): yield leaf def unique_const_leaves (exp): for l in", "(consts, matrix) def get_preds (fp): preds = dict () pred_keys = [] for", "not None: for inst in given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append", "about And and Or because they can take >2 args and # decl(*args)", "= s.model () sub = mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix, *sub)", ") new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts,", "== 0: return mk_true (ctx=ctx) else: return z3.And (*args) def create_fp (smt2file, ctx=None,", "(*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if", "assert z3.is_var (exp) for leaf in insert_and_yield (exp): yield leaf def unique_const_leaves (exp):", "_unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model is not None: print 'FALSE\\n', cex_model", "and the coefficient # of const is 1; # # assume that no", "z3.ForAll (qvars, matrix) else: return z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx)", "by qe_lite #e = elim_bool_ite (exp) #e = elim_term_ite (e) return e #", "(z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1,", "of the form: # Forall vars. body => head where head is a", "(pred) for i in range (n): print '{} : {}'.format (i, fp.get_cover_delta (i,", "z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None", "array and the idx for sel in unique_selects (exp.arg (i), sel_keys): yield sel", "assume that no equality term appears in another equality term # (so, we", "else: if model: return (m, None, None) sub = mk_subst_from_model (m, exist_consts, model_completion=True)", "startswith=''): args = [] for i in range (decl.arity ()): name = startswith", "if exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars,", "current size # check if any of the witnesses already works for m", "def mk_exp_variant_sub (exp, variant): sub = [] for const in unique_const_leaves (exp): const_variant", "add a new set of exist consts sub = [] for c in", "() pred_keys = [] for rule_exp in fp.get_rules (): # rule_exp is a", "= get_preds (fp) print for pred in preds.itervalues (): print 'Lemmas for predicate:", "is_ite (exp): impl1 = z3.Implies (args[0], args[1]) impl2 = z3.Implies (z3.Not (args[0]), args[2])", "(args[0]), args[2]) return z3.And (impl1, impl2) else: return decl (*args) def elim_ite (exp):", "return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx)", "z3.is_false (exp): return exp assert z3.is_app (exp) decl = exp.decl () args =", "list of instances for the universals given by the user as a #", "fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues ():", "arg0,arg1 = arg1,arg0 # swap if has_const (arg0, const): t = process_eq (arg0,", "is not None: for cons in side_cons: s.add (cons) res = s.check ()", "invoke qe_lite once per quantified variable, for better result for i in range", "if not z3.is_quantifier (e): # just an smt problem m = check_sat (e)", "(val_interp.num_entries ()): entry = val_interp.entry (i) val = z3.Store (val, entry.arg_value (0), entry.value", "(1,const==1) and (None,1==const)); # # assume that const only appears in simple arithmetic", "'Lemmas for predicate: ', pred n = fp.get_num_levels (pred) for i in range", "(exp) #e = elim_term_ite (e) return e # sampling based method for quant.", "(inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like above, but looks for", "= matrix assert head is not None # obtain head_decl head_decl = head.decl", "l def unique_var_leaves (exp): for l in unique_leaves (exp): if z3.is_var (l): yield", "variables return (None, cex_model, witnesses) else: # no cex of current size #", "ctx): \"\"\" A version of z3.AstRef.translate that handles sorts and function declarations correctly\"\"\"", "not z3.is_quantifier (expr): return ([], expr) consts = list () for i in", "cons in w_cons: w.add (cons) # obtain witness for instances res = w.check", "if w.check () == z3.unsat: print 'TRUE\\n', sub if model: return (m, None,", "z3.is_var (l): yield l def unique_var_leaves (exp): for l in unique_leaves (exp): if", "t (e).as_expr () if not z3.is_quantifier (e): return e if z3.is_quantifier (e): #", "return (m, None, None) return (sub, None, None) inst = s.model () new_insts.append", "sorts = [z3_translate (x.domain (i), ctx) for i in range (x.arity ())] sorts.append", "if z3.is_var (l): yield l def exp_has_const_leaf (exp, l): for m in unique_const_leaves", "consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix print 'Solving by negating", "the rule of the form: # Forall vars. body => head where head", "s.check () if res == z3.unsat: return mk_false (ctx=exp.ctx) m = s.model ()", "rules if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False)", "else: ret_val = process_eq (arg0, e2+arg1) return ret_val if eq_terms is None: eq_terms", "\"(const==0)==b\", \"ite (const==0, x, y) == 3\", etc.) def unique_eq_terms_on_const (const, exp, eq_terms=None,", "of increasing size, similar to BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall", "and to avoid repetitions of terms, we return (None,eq) for the # duplicates", "is None: leaf_keys = [] if z3.is_const (exp) and not (z3.is_int_value (exp) or", "looking for # counterexamples of increasing size, similar to BMC def solve_exists_forall_incremental (exp,", "= elim_term_ite (e) return e # sampling based method for quant. alternation; #", "the idx for sel in unique_selects (exp.arg (i), sel_keys): yield sel if z3.is_select", "sub def mk_eqs_from_model (m, consts, model_completion=False): eqs = [] for const in consts:", "(c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp =", "k = exp_key (e) if k not in leaf_keys: leaf_keys.append (k) yield e", "# obtain the head unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES):", "def unique_selects (exp, sel_keys=None): def insert_and_yield (e): k = exp_key (e) if k", "(arg0, e2-arg1) else: ret_val = process_eq (arg0, e2+arg1) return ret_val if eq_terms is", "return mk_true (ctx=ctx) else: return z3.And (*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr',", "(inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation", "if not z3.is_bool (exp): return exp if z3.is_true (exp) or z3.is_false (exp): return", "constraints def check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons", "is sufficient s = z3.Solver (ctx=exp.ctx) print 'checking validity of ', z3.substitute (matrix,", "fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file (smt2file) return (q, fp) def create_named_fp", "process_eq (arg0, e2+arg1) return ret_val if eq_terms is None: eq_terms = [] if", "better result for i in range (exp.num_vars ()): e = t (e).as_expr ()", "z3_translate (b, ctx)) res.append (new_p) return res def mk_true (ctx=None): return z3.BoolVal (True,", "(): return z3.IntSort (ctx=ctx) else : assert x.is_real () return z3.RealSort (ctx=ctx) if", "for cons in side_cons: s.add (cons) res = s.check () if res ==", "res.append (new_p) return res def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false", "fml is sat with given side constraints def check_sat (fml, side_cons=None): s =", "nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr () def is_ite", "(qvars, matrix) else: return z3.Exists (qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return", "witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model is not None: print 'FALSE\\n',", "insert (t): yield (t, exp) else: yield (None, exp) elif z3.is_app (exp): for", "() val = z3.K(val_sort, val_interp.else_value ()) for i in range (val_interp.num_entries ()): entry", "univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts = list () witnesses = list", "= z3.Store (val, entry.arg_value (0), entry.value ()) else: val = m.eval (const, model_completion=model_completion)", "= strip_qblock (rule_exp) if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head", "= mk_subst_from_model (m, qvars, model_completion=True) return z3.substitute (matrix, *sub) def nnf (exp): t", "cex_model, witnesses) else: # no cex of current size # check if any", "(k) yield e if leaf_keys is None: leaf_keys = [] if z3.is_const (exp)", "for # counterexamples of increasing size, similar to BMC def solve_exists_forall_incremental (exp, model=False):", "hard to eliminate by qe_lite #e = elim_bool_ite (exp) #e = elim_term_ite (e)", "print 'Matrix:', matrix print 'Solving by negating the problem' cex_size = 0 curr_exist_consts", "check_sat (e) if m is None: return (None, None, None) else: if model:", "the universals given by the user as a # starting point def solve_exists_forall", ") return sub def mk_eqs_from_model (m, consts, model_completion=False): eqs = [] for const", "v_sort = expr.var_sort (i) consts.append (z3.Const (v_name, v_sort)) matrix = z3.substitute_vars (expr.body (),", "consts: # treat arrays specially due to the else_value sort = const.sort ()", "(n): print '{} : {}'.format (i, fp.get_cover_delta (i, pred)) print '{} : {}'.format", "t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite (exp): if", "m.eval (const, model_completion=model_completion) eqs.append (const == val) return eqs def qe_array (exp): if", "strip_qblock (exp) exp = z3.Exists (qvars, z3.Not (matrix)) qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx)", "side constraints def check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml) if", "is_add: ret_val = process_eq (arg0, e2-arg1) else: ret_val = process_eq (arg0, e2+arg1) return", "consts, model_completion=False): sub = [] for const in consts: # treat arrays specially", "(qvars, matrix) else: e = z3.Exists (qvars, matrix) return e if not z3.is_bool", "[const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain", "predicate instance # obtain the head unused_, matrix = strip_qblock (rule_exp) if z3.is_app_of", "# ############################################ import z3 I = z3.IntSort () B = z3.BoolSort () def", "return z3.And (*args) def create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint", "arg1 = exp.arg (1) if has_const (arg1, const): arg0,arg1 = arg1,arg0 # swap", "# treat arrays specially due to the else_value sort = const.sort () if", "matrix) = strip_qblock (qf_exp) if len (qvars) > 0: res = z3.ForAll (qvars,", "variant): sub = [] for const in unique_const_leaves (exp): const_variant = mk_const_variant (const,", "z3.is_quantifier (e): return e if z3.is_quantifier (e): # invoke qe_lite for each variable,", "the user as a # starting point def solve_exists_forall (exp, given_insts=None, model=False): print", "fp = z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False)", "if not z3.is_quantifier (expr): return ([], expr) consts = list () for i", "z3.simplify (e2) else: assert z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of", "0: res = z3.ForAll (qvars, z3.Not (matrix)) else: res = z3.Not (matrix) else:", "can # be hard to eliminate by qe_lite #e = elim_bool_ite (exp) #e", "args.append (z3.Const (name, sort)) return args # check if fml is sat with", "(i, fp.get_cover_delta (i, pred)) print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred)) print", "of z3.AstRef.translate that handles sorts and function declarations correctly\"\"\" if x.ctx == ctx:", "(val, entry.arg_value (0), entry.value ()) else: val = m.eval (const, model_completion=model_completion) else: val", "arg = exp.arg (i) if z3.is_app (arg): yield arg.decl () def has_const (exp,", "range (exp.num_vars ()): e = t (e).as_expr () if not z3.is_quantifier (e): return", "= dict () # map from names to rules if not pp: print", "# Forall vars. body => head where head is a QF predicate instance", "sel_keys): yield sel if z3.is_select (exp): for sel in insert_and_yield (exp): yield sel", "expressions are created -- use z3.eq instead def exp_key (e): return e.ast.value def", "= check_sat (e) if m is None: return (None, None, None) else: if", "(m, qvars, model_completion=True) return z3.substitute (matrix, *sub) def nnf (exp): t = z3.Tactic", "z3.FuncDeclRef): sorts = [z3_translate (x.domain (i), ctx) for i in range (x.arity ())]", "rule of the form: # Forall vars. body => head where head is", "def get_preds (fp): preds = dict () pred_keys = [] for rule_exp in", "None, None) return (sub, None, None) inst = s.model () new_insts.append (inst) print", "ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix)", "fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) for pred in", "return res def mk_const_variant (const, variant): name = '{}_{}'.format (const.decl ().name (), variant)", "i in range (exp.num_args ()) : arg = exp.arg (i) if z3.is_app (arg):", "(exp).as_expr () if is_forall: (qvars, matrix) = strip_qblock (qf_exp) if len (qvars) >", "z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_term_ite (matrix) if exp.is_forall", "(e) print 'Exist consts:', exist_consts print 'Univ consts:', univ_consts print 'Matrix:', matrix print", "def qe_array (exp): if not z3.is_quantifier (exp): return exp is_forall = False if", "z3.BoolSort () def z3_translate (x, ctx): \"\"\" A version of z3.AstRef.translate that handles", "work with the py interface if z3.is_and (exp): return z3.And (*args) elif z3.is_or", "(matrix, *sub)) new_insts = list () witnesses = list () while True: print", "list () witnesses = list () while True: print 'Solver for witness:' for", "(*lemmas)) # doesn't seem to quite work when new expressions are created --", "args are the array and the idx for sel in unique_selects (exp.arg (i),", "(ctx=exp.ctx) print 'checking validity of ', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix,", "(matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:', cex_size+1 # look for", "else : assert x.is_real () return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts", "found # iterator for all equality terms on const; # # each pair", "leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf else: assert z3.is_var (exp) for", "the same equality (e.g. const==1 # vs. 1==const) and to avoid repetitions of", "(e) if k not in sel_keys: sel_keys.append (k) yield e if sel_keys is", "if z3.is_quantifier (e): # invoke qe_lite for each variable, separately (qvars, matrix) =", "= map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr", "None: eq_terms = [] if done_exp is None: done_exp = [] for e", "in range (exp.num_args ()): # args are the array and the idx for", "an atom in the boolean structure of exp) def elim_bool_ite (exp): if z3.is_quantifier", "def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix =", "(z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs)))", "z3.is_arith_sort (x): if x.is_int (): return z3.IntSort (ctx=ctx) else : assert x.is_real ()", "return eqs def qe_array (exp): if not z3.is_quantifier (exp): return exp is_forall =", "a quantified formula representing the rule of the form: # Forall vars. body", "if k not in leaf_keys: leaf_keys.append (k) yield e if leaf_keys is None:", "z3.is_quantifier (expr): return ([], expr) consts = list () for i in reversed", "witness # initialize w with given_insts if given_insts is not None: for inst", "found: eq_terms.append (e) return True return False def process_eq (e1, e2): if z3.eq", "else: assert z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)):", "(new_p) return res def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None):", "z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts, witnesses) m = w.model () witnesses.append", "validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set", "method for quant. alternation; # given_insts is a list of instances for the", "= False for t in eq_terms: if z3.eq (t,e): found = True break", "Exists (qvars, matrix) # obtain a model m of qvars consistent with (matrix", "(qvars, matrix) # obtain a model m of qvars consistent with (matrix /\\", "Forall exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall () (exist_consts, e) =", "(ctx=ctx) if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False)", "qf_exp return res def qe_lite (exp): if not z3.is_quantifier (exp): return exp e", "(expr.body (), *consts) return (consts, matrix) def get_preds (fp): preds = dict ()", "() if res == z3.sat: return s.model () else: return None def mk_subst_from_model", "(qvars, matrix) t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx) return t (exp).as_expr () def elim_term_ite", "universals given by the user as a # starting point def solve_exists_forall (exp,", "z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT SIZE:', cex_size+1 #", "(exp, z3.Z3_OP_ITE) def is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as", "# qe_lite followed by qe def full_qe (exp): temp = qe_lite (exp) return", "for witness # initialize w with given_insts if given_insts is not None: for", "(m) print 'Model for new witness:\\n', m sub = mk_subst_from_model (m, exist_consts, model_completion=True)", "()) # need to worry about And and Or because they can take", "m of qvars consistent with (matrix /\\ side_cons) # the under-approx. is obtained", "cex_model is not None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO: split", "if insert (t): yield (t, exp) else: yield (None, exp) elif z3.is_app (exp):", "(): # rule_exp is a quantified formula representing the rule of the form:", "# to accommodate for alternative representations of the same equality (e.g. const==1 #", "# need to worry about And and Or because they can take >2", "fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) q = fp.parse_file", "else: head = matrix assert head is not None # obtain head_decl head_decl", "and function declarations correctly\"\"\" if x.ctx == ctx: return x if isinstance (x,", "for counterexamples of increasing size; # in other words, we solve the negation", "(matrix) if side_cons is not None: for c in side_cons: s.add (c) res", "else_value sort = const.sort () if isinstance (sort, z3.ArraySortRef): val_interp = m [const]", "if head_key in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()] = head_decl return", "fp = z3.Fixedpoint (ctx=ctx) if not pp: print 'No pre-processing' fp.set (slice=False) fp.set", "eq is an equality logically equivalent to (const==t) # appearing in exp; #", "in unique_leaves (exp.arg (i), leaf_keys): yield leaf else: assert z3.is_var (exp) for leaf", "let exp = Exists (qvars, matrix) # obtain a model m of qvars", "head = matrix assert head is not None # obtain head_decl head_decl =", "()) def mk_exp_variant_sub (exp, variant): sub = [] for const in unique_const_leaves (exp):", "x.is_real () return z3.RealSort (ctx=ctx) if isinstance (x, z3.FuncDeclRef): sorts = [z3_translate (x.domain", "z3.ForAll (qvars, matrix) else: e = z3.Exists (qvars, matrix) return e pre_consts =", "# obtain witness for instances res = w.check () if res == z3.unsat:", "l in unique_const_leaves (exp): if z3.eq (l, const): found = True break return", "new existentially quantified variables which can # be hard to eliminate by qe_lite", "elim_term_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_term_ite", "in preds.itervalues (): print 'Lemmas for predicate: ', pred n = fp.get_num_levels (pred)", "print 'Solving by negating the problem' cex_size = 0 curr_exist_consts = [] curr_matrix_disjs", "= map (exp_key, post_consts) exist_consts = [] for i in range (len (post_consts)):", "not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate)", "[] for c in unique_const_leaves (exp): res.append (c) return res def mk_const_variant (const,", "is None: return (None, None, None) else: if model: return (m, None, None)", "the given problem, looking for # counterexamples of increasing size, similar to BMC", "(exp) def unique_leaves (exp, leaf_keys=None): def insert_and_yield (e): k = exp_key (e) if", "qe_array (exp): if not z3.is_quantifier (exp): return exp is_forall = False if exp.is_forall", "witnesses.append (m) print 'Model for new witness:\\n', m sub = mk_subst_from_model (m, exist_consts,", "sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) # like above,", "(1) else: head = matrix assert head is not None # obtain head_decl", "w with given_insts if given_insts is not None: for inst in given_insts: sub", "in preds head_key = exp_key (head_decl) if head_key in pred_keys: continue pred_keys.append (head_key)", "(const, const_variant) ) return sub def mk_fresh_args (decl, startswith=''): args = [] for", "= False if exp.is_forall (): is_forall = True (qvars, matrix) = strip_qblock (exp)", "new_insts return (None, new_insts, witnesses) m = w.model () witnesses.append (m) print 'Model", "(exp): res.append (c) return res def mk_const_variant (const, variant): name = '{}_{}'.format (const.decl", "(exp): for sel in insert_and_yield (exp): yield sel def extract_consts (exp): res =", "in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()] = head_decl return preds def", "sel_keys.append (k) yield e if sel_keys is None: sel_keys = [] # post-order", "with the caveat that # elim_term_ite introduces new existentially quantified variables which can", "fp.add_rule (rule, name=name) rules [name] = rule return (q, fp, rules) def create_empty_fp", "z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite (exp): if z3.is_quantifier (exp):", "def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp) and", "matrix) return e if not z3.is_bool (exp): return exp if z3.is_true (exp) or", "(cex_size): sub = [] for c in univ_consts: name = '{}_{}'.format (c.decl ().name", "if z3.is_arith_sort (x): if x.is_int (): return z3.IntSort (ctx=ctx) else : assert x.is_real", "m = w.model () witnesses.append (m) print 'Model for new witness:\\n', m sub", "representations of the same equality (e.g. const==1 # vs. 1==const) and to avoid", "1; # # assume that no equality term appears in another equality term", "check_sat (fml, side_cons=None): s = z3.Solver (ctx=fml.ctx) s.add (fml) if side_cons is not", "sub = [] for const in unique_const_leaves (exp): const_variant = mk_const_variant (const, variant)", "= fp.parse_file (smt2file) return (q, fp) def create_named_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False):", "for m in unique_const_leaves (exp): if l.eq (m): return True return False def", "eqs def qe_array (exp): if not z3.is_quantifier (exp): return exp is_forall = False", "() if is_forall: (qvars, matrix) = strip_qblock (qf_exp) if len (qvars) > 0:", "= w.check () if res == z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts,", "(engine=engine, use_farkas=True, generate_proof_trace=False) for pred in given_preds.itervalues (): fp.register_relation (pred) for i,rule in", "based method for quant. alternation; # given_insts is a list of instances for", "(exp) pre_const_keys = map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e =", "not None # obtain head_decl head_decl = head.decl () # ensure head_decl is", "ret_val = process_eq (arg0, e2-arg1) else: ret_val = process_eq (arg0, e2+arg1) return ret_val", "# # assume that no equality term appears in another equality term #", "{}'.format ('oo', fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp, lvl, pred): lemmas =", "exp = Exists (qvars, matrix) # obtain a model m of qvars consistent", "return False def process_eq (e1, e2): if z3.eq (e1, const): ret_val = z3.simplify", "model: return (m, None, None) sub = mk_subst_from_model (m, exist_consts, model_completion=True) return (sub,", "= z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add (cons) # obtain witness for", "def has_const (exp, const): found = False for l in unique_const_leaves (exp): if", "(exp): return # sub-dag is already processed if z3.is_eq (exp): arg0 = exp.arg", "= z3.simplify (e2) else: assert z3.is_app (e1) if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or", "(e1, z3.Z3_OP_ADD) arg0 = e1.arg (0) arg1 = e1.arg (1) if z3.eq (arg1,", "(const.decl ().name (), variant) return z3.Const (name, const.sort ()) def mk_exp_variant_sub (exp, variant):", "if side_cons is a point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert", "is a point def under_approx_qe (exp, side_cons=None): assert z3.is_quantifier (exp) assert not exp.is_forall", "None: return (None, None, None) else: if model: return (m, None, None) sub", "res = z3.ForAll (qvars, z3.Not (matrix)) else: res = z3.Not (matrix) else: res", "return (m, None, None) sub = mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None,", "list of models for the original set of # universal variables return (None,", "*sub) curr_matrix_disjs.append (new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses,", "# rewrite ite as conjunction of implications when it appears as a boolean", "= z3.substitute_vars (expr.body (), *consts) return (consts, matrix) def get_preds (fp): preds =", "(z3.substitute (matrix, *sub)) new_insts = list () witnesses = list () while True:", "t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def cofactor_term_ite (exp): if", "is_xor (exp): return z3.is_app_of (exp, z3.Z3_OP_XOR) # rewrite ite as conjunction of implications", "if fml is sat with given side constraints def check_sat (fml, side_cons=None): s", "([v], matrix)).as_expr () e = matrix return e def qe (exp): if not", "extract_consts (exp) pre_const_keys = map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e", "fp.get_cover_delta (-1, pred)) print def get_level_lemmas (fp, lvl, pred): lemmas = [] for", "[i]) if len (exist_consts) > 0: e = z3.Exists (exist_consts, e) return qe_lite", "is not None: for c in side_cons: s.add (c) res = s.check ()", "under-approx. is obtained as \"matrix [m/qvars]\" # # this is the weakest under-approx.", "exp2 is None and exp1 is not None: return False return exp_key (exp1)", "= exp.arg (0) arg1 = exp.arg (1) if has_const (arg1, const): arg0,arg1 =", "= 0 curr_exist_consts = [] curr_matrix_disjs = [] for i in range (cex_size):", "if res == z3.unsat: print 'FALSE\\n', new_insts return (None, new_insts, witnesses) m =", "(None,1==const)); # # assume that const only appears in simple arithmetic terms and", "point def solve_exists_forall (exp, given_insts=None, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier", "= list () witnesses = list () while True: print 'Solver for witness:'", "each pair (t,eq) is such that eq is an equality logically equivalent to", "(exp) assert not exp.is_forall () (qvars, matrix) = strip_qblock (exp) s = z3.Solver", "(const == val) return eqs def qe_array (exp): if not z3.is_quantifier (exp): return", "None and exp2 is None: return True if exp1 is None and exp2", "= arg1,arg0 # swap if has_const (arg0, const): t = process_eq (arg0, arg1)", "res == z3.unsat: print 'TRUE\\n', sub if model: return (m, None, None) return", "(exp1) == exp_key (exp2) # iterator for declarations of arguments of exp #", "(elim_bool_ite, exp.children ()) # need to worry about And and Or because they", "BMC def solve_exists_forall_incremental (exp, model=False): print 'Exists Forall exp:', exp assert z3.is_quantifier (exp)", "in range (decl.arity ()): name = startswith + str (i) sort = decl.domain", "= qf_exp return res def qe_lite (exp): if not z3.is_quantifier (exp): return exp", "map (exp_key, pre_consts) t = z3.Tactic ('elim-term-ite', ctx=exp.ctx) e = t (exp).as_expr ()", "(sub, None, None) else: assert e.is_forall () (univ_consts, matrix) = strip_qblock (e) print", "(): matrix = t (z3.ForAll ([v], matrix)).as_expr () else: matrix = t (z3.Exists", "of # universal variables return (None, cex_model, witnesses) else: # no cex of", "is a quantified formula representing the rule of the form: # Forall vars.", "if not z3.is_quantifier (e): return e if z3.is_quantifier (e): # invoke qe_lite for", "in given_preds.itervalues (): fp.register_relation (pred) for i,rule in enumerate (given_fp.get_rules ()): name =", "# if arguments contain de-bruijn variables, they are ignored def arg_decls (exp): for", "term appears in another equality term # (so, we can't handle \"(const==0)==b\", \"ite", "separately (qvars, matrix) = strip_qblock (e) for v in qvars: if exp.is_forall ():", "z3.Tactic ('nnf', ctx=exp.ctx) return t (exp).as_expr () def is_ite (exp): return z3.is_app_of (exp,", "'Exists Forall exp:', exp assert z3.is_quantifier (exp) and not exp.is_forall () (exist_consts, e)", "sorts and function declarations correctly\"\"\" if x.ctx == ctx: return x if isinstance", "in range (x.arity ())] sorts.append (z3_translate (x.range (), ctx)) return z3.Function (x.name (),", "return (None, new_insts, witnesses) m = w.model () witnesses.append (m) print 'Model for", "if not pp: fp.set (slice=False) fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine,", "print cons.sexpr () w = z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add (cons)", "instances res = w.check () if res == z3.unsat: print 'FALSE\\n', new_insts return", "not None: print 'FALSE\\n', cex_model print 'Size:', cex_size+1 # TODO: split cex_model into", "(exp): for i in range (exp.num_args ()): # args are the array and", "example, we yield (1,const==1) and (None,1==const)); # # assume that const only appears", "a list of instances for the universals given by the user as a", "const): arg0,arg1 = arg1,arg0 # swap if has_const (arg0, const): t = process_eq", "sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check", "leaf_keys): yield leaf else: assert z3.is_var (exp) for leaf in insert_and_yield (exp): yield", "(const) sub.append ( (c, const) ) new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append", "i,rule in enumerate (given_fp.get_rules ()): name = 'r'+str(i) fp.add_rule (rule, name=name) rules [name]", "= elim_term_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix) else: e", "fp = z3.Fixedpoint (ctx=ctx) rules = dict () # map from names to", "inst in given_insts: sub = mk_subst_from_model (inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub))", "return True return False def unique_selects (exp, sel_keys=None): def insert_and_yield (e): k =", "(cons) # obtain witness for instances res = w.check () if res ==", "(matrix /\\ side_cons) # the under-approx. is obtained as \"matrix [m/qvars]\" # #", "(None,eq) for the # duplicates (in the above example, we yield (1,const==1) and", "universal variables return (None, cex_model, witnesses) else: # no cex of current size", "def unique_var_leaves (exp): for l in unique_leaves (exp): if z3.is_var (l): yield l", "= head_decl return preds def print_lemmas (fp): preds = get_preds (fp) print for", "z3.BoolSort (ctx=ctx) if z3.is_arith_sort (x): if x.is_int (): return z3.IntSort (ctx=ctx) else :", "val_interp.entry (i) val = z3.Store (val, entry.arg_value (0), entry.value ()) else: val =", "if z3.eq (l, const): found = True break return found # iterator for", "# doesn't seem to quite work when new expressions are created -- use", "z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 = e1.arg", "m sub = mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:', sub # check", "= process_eq (arg0, e2+arg1) return ret_val if eq_terms is None: eq_terms = []", "z3.is_eq (exp): arg0 = exp.arg (0) arg1 = exp.arg (1) if has_const (arg1,", "k not in leaf_keys: leaf_keys.append (k) yield e if leaf_keys is None: leaf_keys", "that eq is an equality logically equivalent to (const==t) # appearing in exp;", "if z3.is_app (exp): for i in range (exp.num_args ()): # args are the", "unique_leaves (exp.arg (i), leaf_keys): yield leaf else: assert z3.is_var (exp) for leaf in", "(so, we can't handle \"(const==0)==b\", \"ite (const==0, x, y) == 3\", etc.) def", "z3.is_or (exp): return z3.Or (*args) elif is_ite (exp): impl1 = z3.Implies (args[0], args[1])", "else: val = m.eval (const, model_completion=model_completion) eqs.append (const == val) return eqs def", "or z3.is_false (exp): return exp assert z3.is_app (exp) decl = exp.decl () args", "yield l def unique_var_leaves (exp): for l in unique_leaves (exp): if z3.is_var (l):", "else: return None def mk_subst_from_model (m, consts, model_completion=False): sub = [] for const", "res.append (c) return res def mk_const_variant (const, variant): name = '{}_{}'.format (const.decl ().name", "(z3.Const (name, sort)) return args # check if fml is sat with given", "= '{}_{}'.format (const.decl ().name (), variant) return z3.Const (name, const.sort ()) def mk_exp_variant_sub", "for c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str (i)) const", "e2+arg1) return ret_val if eq_terms is None: eq_terms = [] if done_exp is", "z3.Solver (ctx=exp.ctx) sub = mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub))", "post_const_keys = map (exp_key, post_consts) exist_consts = [] for i in range (len", "def qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return t (exp).as_expr () def", "of current size # check if any of the witnesses already works for", "(const, model_completion=model_completion) eqs.append (const == val) return eqs def qe_array (exp): if not", "for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield (t, eq) done_exp.append", "None, None) sub = mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None, None) else:", "z3.eq (t,e): found = True break if not found: eq_terms.append (e) return True", "entry.arg_value (0), entry.value ()) else: val = m.eval (const, model_completion=model_completion) else: val =", "if z3.is_and (exp): return z3.And (*args) elif z3.is_or (exp): return z3.Or (*args) elif", "z3.Fixedpoint (ctx=ctx) rules = dict () # map from names to rules if", "= [] # post-order if z3.is_app (exp): for i in range (exp.num_args ()):", "(exp): return exp assert z3.is_app (exp) decl = exp.decl () args = map", ": idx_sort = sort.domain () val_sort = sort.range () val = z3.K(val_sort, val_interp.else_value", "= False for l in unique_const_leaves (exp): if z3.eq (l, const): found =", "set of exist consts sub = [] for c in univ_consts: name =", "= z3.simplify (arg0-e2) else: if is_add: ret_val = process_eq (arg0, e2-arg1) else: ret_val", "(exp): for i in range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg", "is None: eq_terms = [] if done_exp is None: done_exp = [] for", "for rule_exp in fp.get_rules (): # rule_exp is a quantified formula representing the", "(inst, univ_consts, model_completion=True) w_cons.append (z3.substitute (matrix, *sub)) new_insts = list () witnesses =", "names to rules if not pp: print 'No pre-processing' fp.set (slice=False) fp.set (inline_linear=False)", "(fp): preds = get_preds (fp) print for pred in preds.itervalues (): print 'Lemmas", "z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print 'Solver for validity:', z3.Not", "implications when it appears as a boolean atom # (i.e. an atom in", "0 curr_exist_consts = [] curr_matrix_disjs = [] for i in range (cex_size): sub", "(cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True) if cex_model is not None: print", "(exp_key, post_consts) exist_consts = [] for i in range (len (post_consts)): post_key =", "if x.is_int (): return z3.IntSort (ctx=ctx) else : assert x.is_real () return z3.RealSort", "z3.is_var (exp) for leaf in insert_and_yield (exp): yield leaf def unique_const_leaves (exp): for", "(const, exp.arg (i), eq_terms, done_exp): yield (t, eq) done_exp.append (exp) def unique_leaves (exp,", "in fp.get_rules (): # rule_exp is a quantified formula representing the rule of", "(), ctx)) return z3.Function (x.name (), *sorts) if ctx is None: return x.translate", "isinstance (val_interp, z3.FuncInterp): idx_sort = sort.domain () val_sort = sort.range () val =", "model_completion=True) return z3.substitute (matrix, *sub) def nnf (exp): t = z3.Tactic ('nnf', ctx=exp.ctx)", ") new_disj = z3.substitute (z3.Not (matrix), *sub) curr_matrix_disjs.append (new_disj) while True: print 'CURRENT", "model_completion=False): eqs = [] for const in consts: # treat arrays specially due", "None) sub = mk_subst_from_model (m, exist_consts, model_completion=True) return (sub, None, None) else: assert", "pred_keys.append (head_key) preds [head_decl.name ()] = head_decl return preds def print_lemmas (fp): preds", "(fml) if side_cons is not None: for cons in side_cons: s.add (cons) res", "[] curr_matrix_disjs = [] for i in range (cex_size): sub = [] for", "(new_disj) curr_exp = z3.Exists (curr_exist_consts, z3.ForAll (exist_consts, z3.Or (*curr_matrix_disjs))) (cex_model, witnesses, _unused_insts) =", "= exp_key (head_decl) if head_key in pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()]", "(False, ctx=ctx) def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx) def mk_and (args,", "if eq_terms is None: eq_terms = [] if done_exp is None: done_exp =", "(val_interp is not None) and isinstance (val_interp, z3.FuncInterp) : idx_sort = sort.domain ()", "fp.get_num_levels (pred) + 1): lemmas.append (fp.get_cover_delta (l, pred)) lemmas.append (fp.get_cover_delta (-1, pred)) return", "new witness:\\n', m sub = mk_subst_from_model (m, exist_consts, model_completion=True) print 'New witness:', sub", "translate_pair_list (l, ctx): res = [] for (a,b) in l: new_p = (z3_translate", "quantified variable, for better result for i in range (exp.num_vars ()): e =", "constants which need to be existentially quantified post_consts = extract_consts (e) post_const_keys =", "[] for e in done_exp: if e.eq (exp): return # sub-dag is already", "= True break if not found: eq_terms.append (e) return True return False def", "def qe_lite (exp): if not z3.is_quantifier (exp): return exp e = exp t", "existentially quantified post_consts = extract_consts (e) post_const_keys = map (exp_key, post_consts) exist_consts =", "create_fp (smt2file, ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp:", "= strip_qblock (exp) matrix = cofactor_term_ite (matrix) if exp.is_forall (): return z3.ForAll (qvars,", "return ret_val if eq_terms is None: eq_terms = [] if done_exp is None:", "exp.decl () args = map (elim_bool_ite, exp.children ()) # need to worry about", "return # sub-dag is already processed if z3.is_eq (exp): arg0 = exp.arg (0)", "(e) for v in qvars: if exp.is_forall (): matrix = t (z3.ForAll ([v],", "val_interp = m [const] if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp)", "exp t = z3.Tactic ('qe-light', ctx=exp.ctx) # invoke qe_lite once per quantified variable,", "t in eq_terms: if z3.eq (t,e): found = True break if not found:", "leaf_keys = [] if z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)):", "given_insts is not None: for inst in given_insts: sub = mk_subst_from_model (inst, univ_consts,", "(decl, startswith=''): args = [] for i in range (decl.arity ()): name =", "= rule return (q, fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp", "(exp) or z3.is_rational_value (exp)): for leaf in insert_and_yield (exp): yield leaf elif z3.is_app", "exist_consts.append (post_consts [i]) if len (exist_consts) > 0: e = z3.Exists (exist_consts, e)", "and exp2 is not None: return False if exp2 is None and exp1", "done_exp=None): def insert (e): found = False for t in eq_terms: if z3.eq", "= True (qvars, matrix) = strip_qblock (exp) exp = z3.Exists (qvars, z3.Not (matrix))", "doesn't seem to work with the py interface if z3.is_and (exp): return z3.And", "return z3.Function (x.name (), *sorts) if ctx is None: return x.translate (ctx=z3.main_ctx ())", "sel_keys=None): def insert_and_yield (e): k = exp_key (e) if k not in sel_keys:", "per quantified variable, for better result for i in range (exp.num_vars ()): e", "# given_insts is a list of instances for the universals given by the", "mk_subst_from_model (m, exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check () ==", "(exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock (exp) matrix = cofactor_term_ite (matrix)", "())] sorts.append (z3_translate (x.range (), ctx)) return z3.Function (x.name (), *sorts) if ctx", "print 'Size:', cex_size+1 # TODO: split cex_model into list of models for the", "and exist_consts # add a new set of exist consts sub = []", "structure of exp) def elim_bool_ite (exp): if z3.is_quantifier (exp): (qvars, matrix) = strip_qblock", "(exp.num_vars ()): e = t (e).as_expr () if not z3.is_quantifier (e): return e", "contain de-bruijn variables, they are ignored def arg_decls (exp): for i in range", "exist_consts, model_completion=True) w.add (z3.substitute (z3.Not (matrix), *sub)) if w.check () == z3.unsat: print", "pred_keys: continue pred_keys.append (head_key) preds [head_decl.name ()] = head_decl return preds def print_lemmas", "insert (e): found = False for t in eq_terms: if z3.eq (t,e): found", "on const; # # each pair (t,eq) is such that eq is an", "sort.range () val = z3.K(val_sort, val_interp.else_value ()) for i in range (val_interp.num_entries ()):", "atom # (i.e. an atom in the boolean structure of exp) def elim_bool_ite", "m = check_sat (e) if m is None: return (None, None, None) else:", "create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if not pp: fp.set", "w_cons: print cons.sexpr () w = z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add", "due to the else_value sort = const.sort () if isinstance (sort, z3.ArraySortRef): val_interp", "witnesses) m = w.model () witnesses.append (m) print 'Model for new witness:\\n', m", "(matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1) else: head = matrix assert head is", "assert head is not None # obtain head_decl head_decl = head.decl () #", "fp, rules) def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False): fp = z3.Fixedpoint (ctx=ctx) if", "(i) val = z3.Store (val, entry.arg_value (0), entry.value ()) else: val = m.eval", "return sub def mk_eqs_from_model (m, consts, model_completion=False): eqs = [] for const in", "(exp) return qe (temp) def qe_sat (exp): t = z3.Tactic ('qe-sat', ctx=exp.ctx) return", "matrix) return e pre_consts = extract_consts (exp) pre_const_keys = map (exp_key, pre_consts) t", "args and # decl(*args) doesn't seem to work with the py interface if", "or z3.is_app_of (e1, z3.Z3_OP_SUB)): return None is_add = z3.is_app_of (e1, z3.Z3_OP_ADD) arg0 =", "while True: print 'CURRENT SIZE:', cex_size+1 # look for a cex of size", "rule_exp in fp.get_rules (): # rule_exp is a quantified formula representing the rule", "logically equivalent to (const==t) # appearing in exp; # # to accommodate for", "= [] for (a,b) in l: new_p = (z3_translate (a, ctx), z3_translate (b,", "decl (*args) def elim_ite (exp): e = cofactor_term_ite (exp) e = elim_bool_ite (e)", "return res def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return", "z3.is_quantifier (e): # just an smt problem m = check_sat (e) if m", "if exp.is_forall (): is_forall = True (qvars, matrix) = strip_qblock (exp) exp =", "exp assert z3.is_quantifier (exp) and not exp.is_forall () (exist_consts, e) = strip_qblock (exp)", "None: return False if exp2 is None and exp1 is not None: return", "model_completion=True) print 'New witness:', sub # check if the witness is sufficient s", "(exp): if z3.is_var (l): yield l def exp_has_const_leaf (exp, l): for m in", "'cex_size' # Exists U1,U2,..U_cex_size. Forall E. Not (matrix), # where U and E", "(ctx=None): return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def", "# appearing in exp; # # to accommodate for alternative representations of the", "range (exp.num_args ()): for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp): yield", "(exp): (qvars, matrix) = strip_qblock (exp) matrix = elim_term_ite (matrix) if exp.is_forall ():", "fp.set (inline_linear=False) fp.set (inline_eager=False) fp.set (validate_result=validate) fp.set (engine=engine, use_farkas=True, generate_proof_trace=False) return fp def", "(exp) decl = exp.decl () args = map (elim_bool_ite, exp.children ()) # need", "where head is a QF predicate instance # obtain the head unused_, matrix", "quite work when new expressions are created -- use z3.eq instead def exp_key", "w = z3.Solver (ctx=exp.ctx) for cons in w_cons: w.add (cons) # obtain witness", "(e.g. const==1 # vs. 1==const) and to avoid repetitions of terms, we return", "the else_value sort = const.sort () if isinstance (sort, z3.ArraySortRef): val_interp = m", "(matrix), # where U and E are univ_consts and exist_consts # add a", "cons in side_cons: s.add (cons) res = s.check () if res == z3.sat:", "which can # be hard to eliminate by qe_lite #e = elim_bool_ite (exp)", "return z3.BoolVal (True, ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int", "None and exp1 is not None: return False return exp_key (exp1) == exp_key", "len (args) == 0: return mk_true (ctx=ctx) else: return z3.And (*args) def create_fp", "print 'Solver for witness:' for cons in w_cons: print cons.sexpr () w =", "'checking validity of ', z3.substitute (matrix, *sub) s.add (z3.Not (z3.substitute (matrix, *sub))) print", "(i)) const = z3.Const (name, c.sort ()) curr_exist_consts.append (const) sub.append ( (c, const)", "given_insts if given_insts is not None: for inst in given_insts: sub = mk_subst_from_model", "print def get_level_lemmas (fp, lvl, pred): lemmas = [] for l in range", "not exp.is_forall () (qvars, matrix) = strip_qblock (exp) s = z3.Solver (ctx=exp.ctx) s.add", "done the following with the caveat that # elim_term_ite introduces new existentially quantified", "list () for i in reversed (range (expr.num_vars ())): v_name = expr.var_name (i)", "(exp2) # iterator for declarations of arguments of exp # if arguments contain", "ctx=ctx) def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx) def mk_int (val, ctx=None): return", "= given_fp.parse_file (smt2file) given_preds = get_preds (given_fp) fp = z3.Fixedpoint (ctx=ctx) rules =", "if side_cons is not None: for c in side_cons: s.add (c) res =", "if z3.eq (e1, const): ret_val = z3.simplify (e2) else: assert z3.is_app (e1) if", "= [] for c in univ_consts: name = '{}_{}'.format (c.decl ().name (), str", "exist_consts # add a new set of exist consts sub = [] for", "exp1 is None and exp2 is not None: return False if exp2 is", "(exp) matrix = elim_term_ite (matrix) if exp.is_forall (): e = z3.ForAll (qvars, matrix)", "print 'New witness:', sub # check if the witness is sufficient s =", "unique_selects (exp, sel_keys=None): def insert_and_yield (e): k = exp_key (e) if k not", "in range (val_interp.num_entries ()): entry = val_interp.entry (i) val = z3.Store (val, entry.arg_value", "strip_qblock (e) for v in qvars: if exp.is_forall (): matrix = t (z3.ForAll", "*sub)) if w.check () == z3.unsat: print 'TRUE\\n', sub if model: return (m," ]
[ "new_level): self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current sun stage", "(self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level == 4:", "have red and green reversed! # For RGBW NeoPixels, simply change the ORDER", "self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position", "self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF)", "self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3)", "for Adafruit Industries # SPDX-License-Identifier: MIT # Simple test for NeoPixels on Raspberry", "self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels]", "colors - RGB or GRB. Some NeoPixels have red and green reversed! #", "print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already lowest\") return", "the ORDER to RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin,", "self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) %", "change of position over time timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset", "= new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level))", "level self.sun_level += 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def", "#level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position", "1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\")", "= 1 # The order of the pixel colors - RGB or GRB.", "sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK =", "print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already", "== 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level == 2:", "is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun = sunController() sun.update_position() while input", "+2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position +", "import threading class sunController(): def __init__(self): # Choose an open pin connected to", "self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position", "(self.SUN_STRONG) #level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)", "#Update level self.sun_level += 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels()", "sun = sunController() sun.update_position() while input != 'quit': command = input() if command", "Industries # SPDX-License-Identifier: MIT # Simple test for NeoPixels on Raspberry Pi import", "= (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self,", "self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level", "the NeoPixel strip, i.e. board.D18 # NeoPixels must be connected to D10, D12,", "self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) %", "(self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG)", "self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create", "__init__(self): # Choose an open pin connected to the Data In of the", "% self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level 4 if", "(self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self,", "self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current sun stage is:", "new thread for non-blocking change of position over time timer = threading.Timer(3.4, self.update_position)", "SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT # Simple test for", "print(\"Sunlevel already lowest\") return #Update level self.sun_level -= 1 print(\"New sun stage is:", "= input() if command == \"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun() if", "start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF)", "% self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) %", "pixels self.redraw_pixels() ''' ''' sun = sunController() sun.update_position() while input != 'quit': command", "#level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position", "self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position", "self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level):", "1 #Redraw pixels self.redraw_pixels() #Create a new thread for non-blocking change of position", "% self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels", "non-blocking change of position over time timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self):", "time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw pixels", "0 #Start level of the sun between lowest 1 and 4 max self.sun_level", "a new thread for non-blocking change of position over time timer = threading.Timer(3.4,", "D10, D12, D18 or D21 to work. self.pixel_pin = board.D10 # The number", "#Start position of sun self.current_position = 0 #Start level of the sun between", "sun stage is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already max\") return #Update", "RGB or GRB. Some NeoPixels have red and green reversed! # For RGBW", "self.redraw_pixels() #Create a new thread for non-blocking change of position over time timer", "-2) % self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position", "#Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self):", "if command == \"2\": sun.decrease_sun() if command == \"3\": sun.test_colors() command = ''", "class sunController(): def __init__(self): # Choose an open pin connected to the Data", "sun.increase_sun() if command == \"2\": sun.decrease_sun() if command == \"3\": sun.test_colors() command =", "self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels]", "= (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) %", "== 1: print(\"Sunlevel already lowest\") return #Update level self.sun_level -= 1 print(\"New sun", "to the Data In of the NeoPixel strip, i.e. board.D18 # NeoPixels must", "red and green reversed! # For RGBW NeoPixels, simply change the ORDER to", "Pi import time import board import neopixel import colorsys import threading class sunController():", "4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] =", "% self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level 3 if", "% self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position %", "self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\")", "(20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position]", "% self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -", "#TODO - Create 4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD", "D18 or D21 to work. self.pixel_pin = board.D10 # The number of NeoPixels", "self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels]", "= neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun stages", "GRB. Some NeoPixels have red and green reversed! # For RGBW NeoPixels, simply", "= (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level ==", "of the sun between lowest 1 and 4 max self.sun_level = 1 #", "set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current sun", "lowest\") return #Update level self.sun_level -= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw", "# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT # Simple test", "number of NeoPixels 147 + 1 for some reason self.num_pixels = 88 #Start", "{}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already max\") return #Update level self.sun_level +=", "def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show()", "= 88 #Start position of sun self.current_position = 0 #Start level of the", "4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK", "self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position", "stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun = sunController() sun.update_position() while", "2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT # Simple test for NeoPixels", "is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already max\") return #Update level self.sun_level", "# Simple test for NeoPixels on Raspberry Pi import time import board import", "- Create 4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD =", "of position over time timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels", "self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level", "NeoPixels have red and green reversed! # For RGBW NeoPixels, simply change the", "strip, i.e. board.D18 # NeoPixels must be connected to D10, D12, D18 or", "sun between lowest 1 and 4 max self.sun_level = 1 # The order", "import board import neopixel import colorsys import threading class sunController(): def __init__(self): #", "self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level", "print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current", "(self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD)", "= (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) %", "NeoPixels, simply change the ORDER to RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels", "max self.sun_level = 1 # The order of the pixel colors - RGB", "= (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level ==", "(self.SUN_MILD) #level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)", "pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level", "# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW. self.ORDER", "def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 1:", "neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun stages self.SUN_INTENSE", "= 0 #Start level of the sun between lowest 1 and 4 max", "for some reason self.num_pixels = 88 #Start position of sun self.current_position = 0", "NeoPixel strip, i.e. board.D18 # NeoPixels must be connected to D10, D12, D18", "% self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level = new_level", "self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1:", "return #Update level self.sun_level -= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels", "88 #Start position of sun self.current_position = 0 #Start level of the sun", "== 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] =", "#Redraw pixels self.redraw_pixels() #Create a new thread for non-blocking change of position over", "order of the pixel colors - RGB or GRB. Some NeoPixels have red", "is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already lowest\") return #Update level self.sun_level", "1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF)", "threading class sunController(): def __init__(self): # Choose an open pin connected to the", "% self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position %", "time timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1", "+= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun", "stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0)", "self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels()", "+= 1 #Redraw pixels self.redraw_pixels() #Create a new thread for non-blocking change of", "self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE)", "self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels]", "to RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1,", "board.D18 # NeoPixels must be connected to D10, D12, D18 or D21 to", "sun.update_position() while input != 'quit': command = input() if command == \"1\": sun.increase_sun()", "new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if", "(0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show()", "MIT # Simple test for NeoPixels on Raspberry Pi import time import board", "= (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show()", "% self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position -", "pixel_order=self.ORDER) #TODO - Create 4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0)", "stage is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already lowest\") return #Update level", "the sun between lowest 1 and 4 max self.sun_level = 1 # The", "= neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create", "{}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already lowest\") return #Update level self.sun_level -=", "NeoPixels 147 + 1 for some reason self.num_pixels = 88 #Start position of", "connected to D10, D12, D18 or D21 to work. self.pixel_pin = board.D10 #", "self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) %", "#Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] =", "= (self.SUN_STRONG) #level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] =", "between lowest 1 and 4 max self.sun_level = 1 # The order of", "#Update level self.sun_level -= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels()", "self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position", "self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO -", "of NeoPixels 147 + 1 for some reason self.num_pixels = 88 #Start position", "of sun self.current_position = 0 #Start level of the sun between lowest 1", "decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already lowest\")", "self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF)", "thread for non-blocking change of position over time timer = threading.Timer(3.4, self.update_position) timer.start()", "self.pixels.show() def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\")", "position over time timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF)", "= sunController() sun.update_position() while input != 'quit': command = input() if command ==", "pin connected to the Data In of the NeoPixel strip, i.e. board.D18 #", "redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels]", "if command == \"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun() if command ==", "(self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() ''' def", "def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def", "RGBW NeoPixels, simply change the ORDER to RGBW or GRBW. self.ORDER = neopixel.GRB", "+2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level 4", "self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD)", "time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3)", "-1) % self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position", "self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] =", "self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw", "self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3)", "{}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun = sunController() sun.update_position() while input !=", "self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF", "neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4", "command = input() if command == \"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun()", "import time import board import neopixel import colorsys import threading class sunController(): def", "== \"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun() if command == \"3\": sun.test_colors()", "'quit': command = input() if command == \"1\": sun.increase_sun() if command == \"2\":", "sunController(): def __init__(self): # Choose an open pin connected to the Data In", "self.sun_level += 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self):", "4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +", "self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1)", "= (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels]", "self.sun_level = 1 # The order of the pixel colors - RGB or", "Some NeoPixels have red and green reversed! # For RGBW NeoPixels, simply change", "pixels self.redraw_pixels() #Create a new thread for non-blocking change of position over time", "+ 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG)", "self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show()", "-= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun", "auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG =", "if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level", "1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2)", "= (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE)", "self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun stages self.SUN_INTENSE =", "#level 1 if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2", "time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating", "= (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position", "while input != 'quit': command = input() if command == \"1\": sun.increase_sun() if", "self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position", "stage is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already max\") return #Update level", "board import neopixel import colorsys import threading class sunController(): def __init__(self): # Choose", "- 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position", "neopixel import colorsys import threading class sunController(): def __init__(self): # Choose an open", "self.sun_level -= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' '''", "and 4 max self.sun_level = 1 # The order of the pixel colors", "sun stage is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel already lowest\") return #Update", "ORDER to RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels,", "= (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() '''", "or D21 to work. self.pixel_pin = board.D10 # The number of NeoPixels 147", "#Start level of the sun between lowest 1 and 4 max self.sun_level =", "self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show()", "self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level", "test for NeoPixels on Raspberry Pi import time import board import neopixel import", "(self.SUN_INTENSE) #level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)", "#Redraw pixels self.redraw_pixels() ''' ''' sun = sunController() sun.update_position() while input != 'quit':", "some reason self.num_pixels = 88 #Start position of sun self.current_position = 0 #Start", "2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1)", "decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 1: print(\"Sunlevel", "work. self.pixel_pin = board.D10 # The number of NeoPixels 147 + 1 for", "self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create a new thread for non-blocking change", "= (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show()", "= (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] =", "3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1)", "= (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels]", "sunController() sun.update_position() while input != 'quit': command = input() if command == \"1\":", "Simple test for NeoPixels on Raspberry Pi import time import board import neopixel", "(255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0)", "self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position", "def increase_sun(self): print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 4:", "+1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2)", "print(\"Sunlevel already max\") return #Update level self.sun_level += 1 print(\"New sun stage is:", "reversed! # For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.", "test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3)", "and green reversed! # For RGBW NeoPixels, simply change the ORDER to RGBW", "init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self):", "def update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create a new", "self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level ==", "the Data In of the NeoPixel strip, i.e. board.D18 # NeoPixels must be", "(self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] =", "open pin connected to the Data In of the NeoPixel strip, i.e. board.D18", "if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) %", "For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW. self.ORDER =", "% self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3)", "== 4: print(\"Sunlevel already max\") return #Update level self.sun_level += 1 print(\"New sun", "self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw pixels self.redraw_pixels()", "147 + 1 for some reason self.num_pixels = 88 #Start position of sun", "increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already max\")", "= start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show()", "D21 to work. self.pixel_pin = board.D10 # The number of NeoPixels 147 +", "(self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level == 3:", "timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if", "pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)", "max\") return #Update level self.sun_level += 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw", "self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] =", "self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels]", "input() if command == \"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun() if command", "self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG)", "% self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) %", "{}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level))", "self.num_pixels] = (self.SUN_INTENSE) #level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels]", "self.sun_level == 1: print(\"Sunlevel already lowest\") return #Update level self.sun_level -= 1 print(\"New", "self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position =", "+ 1 for some reason self.num_pixels = 88 #Start position of sun self.current_position", "self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF)", "self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self):", "self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels]", "level of the sun between lowest 1 and 4 max self.sun_level = 1", "self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels]", "of the NeoPixel strip, i.e. board.D18 # NeoPixels must be connected to D10,", "The number of NeoPixels 147 + 1 for some reason self.num_pixels = 88", "self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK)", "NeoPixels on Raspberry Pi import time import board import neopixel import colorsys import", "brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG", "# SPDX-License-Identifier: MIT # Simple test for NeoPixels on Raspberry Pi import time", "self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD)", "self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] =", "D12, D18 or D21 to work. self.pixel_pin = board.D10 # The number of", "#Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if", "RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False,", "SPDX-License-Identifier: MIT # Simple test for NeoPixels on Raspberry Pi import time import", "% self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2)", "is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is:", "= (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF =", "self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG)", "def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun increased\") print(\"current", "sun self.current_position = 0 #Start level of the sun between lowest 1 and", "= board.D10 # The number of NeoPixels 147 + 1 for some reason", "if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1)", "= (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def", "In of the NeoPixel strip, i.e. board.D18 # NeoPixels must be connected to", "print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already max\") return", "4: print(\"Sunlevel already max\") return #Update level self.sun_level += 1 print(\"New sun stage", "4 max self.sun_level = 1 # The order of the pixel colors -", "pixels self.pixels.show() def set_level(self, new_level): self.sun_level = new_level self.redraw_pixels() ''' def increase_sun(self): print(\"sun", "Choose an open pin connected to the Data In of the NeoPixel strip,", "to D10, D12, D18 or D21 to work. self.pixel_pin = board.D10 # The", "The order of the pixel colors - RGB or GRB. Some NeoPixels have", "start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3)", "if self.sun_level == 1: print(\"Sunlevel already lowest\") return #Update level self.sun_level -= 1", "sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun = sunController() sun.update_position()", "the pixel colors - RGB or GRB. Some NeoPixels have red and green", "increase_sun(self): print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel", "-2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position", "''' def increase_sun(self): print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level ==", "self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def", "or GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER)", "= (self.SUN_INTENSE) #level 2 if self.sun_level == 2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] =", "self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show()", "''' ''' sun = sunController() sun.update_position() while input != 'quit': command = input()", "1 for some reason self.num_pixels = 88 #Start position of sun self.current_position =", "Data In of the NeoPixel strip, i.e. board.D18 # NeoPixels must be connected", "GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO", "+1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) #level 3", "colorsys import threading class sunController(): def __init__(self): # Choose an open pin connected", "input != 'quit': command = input() if command == \"1\": sun.increase_sun() if command", "3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal", "self.current_position = start_position self.pixels[self.current_position] = (self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK)", "self.pixel_pin = board.D10 # The number of NeoPixels 147 + 1 for some", "Adafruit Industries # SPDX-License-Identifier: MIT # Simple test for NeoPixels on Raspberry Pi", "import neopixel import colorsys import threading class sunController(): def __init__(self): # Choose an", "sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun", "update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create a new thread", "command == \"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun() if command == \"3\":", "1: print(\"Sunlevel already lowest\") return #Update level self.sun_level -= 1 print(\"New sun stage", "1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun =", "1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position", "def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1: self.pixels[self.current_position %", "self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1)", "self.sun_level == 4: print(\"Sunlevel already max\") return #Update level self.sun_level += 1 print(\"New", "lowest 1 and 4 max self.sun_level = 1 # The order of the", "timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level == 1: self.pixels[self.current_position", "+ 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK)", "reason self.num_pixels = 88 #Start position of sun self.current_position = 0 #Start level", "(32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() self.current_position", "self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def", "time.sleep(3) self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position +=", "#print(\"updating position\") self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create a new thread for", "= (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] =", "self.num_pixels = 88 #Start position of sun self.current_position = 0 #Start level of", "(self.SUN_INTENSE) self.pixels.show() def test_colors(self): self.pixels.fill(self.SUN_COLOR_OFF) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_WEAK) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_MILD) self.pixels.show() time.sleep(3)", "!= 'quit': command = input() if command == \"1\": sun.increase_sun() if command ==", "green reversed! # For RGBW NeoPixels, simply change the ORDER to RGBW or", "command == \"2\": sun.decrease_sun() if command == \"3\": sun.test_colors() command = '' '''", "(self.SUN_MILD) self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK) self.pixels[(self.current_position - 3) % self.num_pixels]", "time import board import neopixel import colorsys import threading class sunController(): def __init__(self):", "- 3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level", "an open pin connected to the Data In of the NeoPixel strip, i.e.", "simply change the ORDER to RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels =", "self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level", "(64,16,0) self.SUN_MILD = (32,8,0) self.SUN_WEAK = (20,6,0) self.SUN_COLOR_OFF = (0,0,0) def init_sun(self, start_position):", "to work. self.pixel_pin = board.D10 # The number of NeoPixels 147 + 1", "if self.sun_level == 4: print(\"Sunlevel already max\") return #Update level self.sun_level += 1", "return #Update level self.sun_level += 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels", "change the ORDER to RGBW or GRBW. self.ORDER = neopixel.GRB self.pixels = neopixel.NeoPixel(", "def __init__(self): # Choose an open pin connected to the Data In of", "1 # The order of the pixel colors - RGB or GRB. Some", "= threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level", "== 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] =", "be connected to D10, D12, D18 or D21 to work. self.pixel_pin = board.D10", "self.redraw_pixels() ''' ''' sun = sunController() sun.update_position() while input != 'quit': command =", "import colorsys import threading class sunController(): def __init__(self): # Choose an open pin", "2: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG)", "Raspberry Pi import time import board import neopixel import colorsys import threading class", "self.pixels.fill(self.SUN_STRONG) self.pixels.show() time.sleep(3) self.pixels.fill(self.SUN_INTENSE) self.pixels.show() time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position += 1", "= (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] =", "self.current_position = 0 #Start level of the sun between lowest 1 and 4", "#Create a new thread for non-blocking change of position over time timer =", "or GRB. Some NeoPixels have red and green reversed! # For RGBW NeoPixels,", "NeoPixels must be connected to D10, D12, D18 or D21 to work. self.pixel_pin", "stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage", "if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) %", "1 and 4 max self.sun_level = 1 # The order of the pixel", "# NeoPixels must be connected to D10, D12, D18 or D21 to work.", "-1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2)", "of the pixel colors - RGB or GRB. Some NeoPixels have red and", "% self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position -1) %", "(self.SUN_STRONG) self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD)", "print(\"sun increased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level == 4: print(\"Sunlevel already", "board.D10 # The number of NeoPixels 147 + 1 for some reason self.num_pixels", "pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!", "# The number of NeoPixels 147 + 1 for some reason self.num_pixels =", "= (self.SUN_MILD) #level 4 if self.sun_level == 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] =", "print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() ''' ''' sun = sunController()", "position\") self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create a new thread for non-blocking", "i.e. board.D18 # NeoPixels must be connected to D10, D12, D18 or D21", "for non-blocking change of position over time timer = threading.Timer(3.4, self.update_position) timer.start() def", "(self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels]", "level self.sun_level -= 1 print(\"New sun stage is: {}\".format(self.sun_level)) #Redraw pixels self.redraw_pixels() '''", "self.redraw_pixels() def decrease_sun(self): print(\"sun decreased\") print(\"current sun stage is: {}\".format(self.sun_level)) if self.sun_level ==", "== 4: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position + 1) % self.num_pixels]", "threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level 1 if self.sun_level ==", "already lowest\") return #Update level self.sun_level -= 1 print(\"New sun stage is: {}\".format(self.sun_level))", "self.num_pixels] = (self.SUN_MILD) self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD) #level 4 if self.sun_level", "(self.SUN_STRONG) self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG) self.pixels[(self.current_position +2) % self.num_pixels] =", "% self.num_pixels] = (self.SUN_STRONG) #level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position %", "3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG)", "# The order of the pixel colors - RGB or GRB. Some NeoPixels", "#level 3 if self.sun_level == 3: self.pixels.fill(self.SUN_COLOR_OFF) self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) self.pixels[(self.current_position", "Create 4 sun stages self.SUN_INTENSE = (255,64,0) self.SUN_STRONG = (64,16,0) self.SUN_MILD = (32,8,0)", "1 if self.sun_level == 1: self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE) #level 2 if", "must be connected to D10, D12, D18 or D21 to work. self.pixel_pin =", "self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun stages self.SUN_INTENSE = (255,64,0)", "ladyada for Adafruit Industries # SPDX-License-Identifier: MIT # Simple test for NeoPixels on", "position of sun self.current_position = 0 #Start level of the sun between lowest", "on Raspberry Pi import time import board import neopixel import colorsys import threading", "self.pixels = neopixel.NeoPixel( self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER) #TODO - Create 4 sun", "time.sleep(3) def update_position(self): #print(\"updating position\") self.current_position += 1 #Redraw pixels self.redraw_pixels() #Create a", "already max\") return #Update level self.sun_level += 1 print(\"New sun stage is: {}\".format(self.sun_level))", "over time timer = threading.Timer(3.4, self.update_position) timer.start() def redraw_pixels(self): #Reset pixels self.pixels.fill(self.SUN_COLOR_OFF) #level", "# Choose an open pin connected to the Data In of the NeoPixel", "- RGB or GRB. Some NeoPixels have red and green reversed! # For", "''' sun = sunController() sun.update_position() while input != 'quit': command = input() if", "\"1\": sun.increase_sun() if command == \"2\": sun.decrease_sun() if command == \"3\": sun.test_colors() command", "connected to the Data In of the NeoPixel strip, i.e. board.D18 # NeoPixels", "3) % self.num_pixels] = (self.SUN_WEAK) #Reveal pixels self.pixels.show() def set_level(self, new_level): self.sun_level =", "for NeoPixels on Raspberry Pi import time import board import neopixel import colorsys" ]
[ "name and a default value self.arg_names: Set[str] = None self.type: str = None", "is to process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value", "= None self.type: str = None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for", "a name and a default value self.arg_names: Set[str] = None self.type: str =", "return elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args = [] fname = [node.attr]", "This is a general function not belonging to any particular class # Ideally", "function not belonging to any particular class # Ideally this should never happen", "self.type = 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node): return eval(compile(ast.Expression(node), 'dummy',", "def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args", "ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda", "isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args = [] fname = [node.attr] while isinstance(node.value,", "node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda", "self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This is a method fname = [node.func.attr]", "set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args = ['self']", "fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method' else: # This is a", "is a method fname = [node.func.attr] node = node.func # Sometimes things can", "sig: str): self.sig: str = sig self.fname: str = None self.pos_args: List[str] =", "self.fname: str = None self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]] = None", "self.arg_names: Set[str] = None self.type: str = None self.init() def init(self): root =", "fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args))", "like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname))", "complicated, the following is to process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute):", "This is a method fname = [node.func.attr] node = node.func # Sometimes things", "be complicated, the following is to process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value,", "self.fname = \".\".join(reversed(fname)) self.type = 'method' else: # This is a general function", "| set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args", "x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This is a method fname", "Any]] = None # Keyword arguments have both a name and a default", "isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method'", "self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This is", "ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id,", "str = None self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]] = None #", "Tuple, Any, Set class ISignature: def __init__(self, sig: str): self.sig: str = sig", "Any, Set class ISignature: def __init__(self, sig: str): self.sig: str = sig self.fname:", "set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def", "'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node): return eval(compile(ast.Expression(node), 'dummy', 'eval'), {'nan':", "ISignature: def __init__(self, sig: str): self.sig: str = sig self.fname: str = None", "any particular class # Ideally this should never happen self.fname = node.func.id self.arg_names", "list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func,", "the following is to process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node", "self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node, ast.Call):", "= \".\".join(reversed(fname)) self.type = 'method' else: # This is a general function not", "self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node): return eval(compile(ast.Expression(node),", "ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method' else:", "x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): #", "ast.Attribute): self.pos_args = ['self'] self.kw_args = [] fname = [node.attr] while isinstance(node.value, ast.Attribute):", "self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args = [] fname =", "x[0], self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node): return", "'method' else: # This is a general function not belonging to any particular", "str = sig self.fname: str = None self.pos_args: List[str] = None self.kw_args: List[Tuple[str,", "node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method' else: # This is", "import List, Tuple, Any, Set class ISignature: def __init__(self, sig: str): self.sig: str", "have both a name and a default value self.arg_names: Set[str] = None self.type:", "fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method' else: # This is a general", "Sometimes things can be complicated, the following is to process # cases like", "to any particular class # Ideally this should never happen self.fname = node.func.id", "self.fname = node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return elif", "is a general function not belonging to any particular class # Ideally this", "= ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda x:", "fname = [node.func.attr] node = node.func # Sometimes things can be complicated, the", "| set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod", "x: x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute):", "self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args", "self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type =", "self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments have both a name and", "= None self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments have both a", "class # Ideally this should never happen self.fname = node.func.id self.arg_names = set(self.pos_args)", "= \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute'", "import ast from numpy import nan from typing import List, Tuple, Any, Set", "(x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This is a method fname =", "elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args = [] fname = [node.attr] while", "node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This", "def __init__(self, sig: str): self.sig: str = sig self.fname: str = None self.pos_args:", "node = node.func # Sometimes things can be complicated, the following is to", "process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id)", "ast.Call): self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)),", "isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg,", "method fname = [node.func.attr] node = node.func # Sometimes things can be complicated,", "if isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda x:", "self.type = 'method' else: # This is a general function not belonging to", "= None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if", "[node.attr] while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names", "List, Tuple, Any, Set class ISignature: def __init__(self, sig: str): self.sig: str =", "for node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id, node.args))", "things can be complicated, the following is to process # cases like 'DataFrame.at.__getitem__'", "= set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed", "<reponame>chyanju/autopandas import ast from numpy import nan from typing import List, Tuple, Any,", "else: # This is a general function not belonging to any particular class", "and a default value self.arg_names: Set[str] = None self.type: str = None self.init()", "# Sometimes things can be complicated, the following is to process # cases", "import nan from typing import List, Tuple, Any, Set class ISignature: def __init__(self,", "# Keyword arguments have both a name and a default value self.arg_names: Set[str]", "= list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if", "default value self.arg_names: Set[str] = None self.type: str = None self.init() def init(self):", "List[Tuple[str, Any]] = None # Keyword arguments have both a name and a", "should never happen self.fname = node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x: x[0],", "= ['self'] self.kw_args = [] fname = [node.attr] while isinstance(node.value, ast.Attribute): node =", "self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords))", "Ideally this should never happen self.fname = node.func.id self.arg_names = set(self.pos_args) | set(map(lambda", "from typing import List, Tuple, Any, Set class ISignature: def __init__(self, sig: str):", "= set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args =", "node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x: x[0],", "# This is a method fname = [node.func.attr] node = node.func # Sometimes", "node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute):", "= node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x:", "from numpy import nan from typing import List, Tuple, Any, Set class ISignature:", "self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute' return raise", "# cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname", "node.func # Sometimes things can be complicated, the following is to process #", "self.kw_args = [] fname = [node.attr] while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr)", "a default value self.arg_names: Set[str] = None self.type: str = None self.init() def", "class ISignature: def __init__(self, sig: str): self.sig: str = sig self.fname: str =", "list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This is a method", "node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method' else: #", "x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args = []", "[] fname = [node.attr] while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname", "both a name and a default value self.arg_names: Set[str] = None self.type: str", "List[str] = None self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments have both", "= 'method' else: # This is a general function not belonging to any", "this should never happen self.fname = node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x:", "= node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type = 'method' else: # This", "never happen self.fname = node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args))", "None self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments have both a name", "a method fname = [node.func.attr] node = node.func # Sometimes things can be", "numpy import nan from typing import List, Tuple, Any, Set class ISignature: def", "fname = [node.attr] while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname =", "Set class ISignature: def __init__(self, sig: str): self.sig: str = sig self.fname: str", "\".\".join(reversed(fname)) self.type = 'method' else: # This is a general function not belonging", "self.pos_args = ['self'] self.kw_args = [] fname = [node.attr] while isinstance(node.value, ast.Attribute): node", "ast.Attribute): # This is a method fname = [node.func.attr] node = node.func #", "cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname =", "'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type", "sig self.fname: str = None self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]] =", "a general function not belonging to any particular class # Ideally this should", "str = None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root):", "set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args =", "x[0], self.kw_args)) return elif isinstance(node, ast.Attribute): self.pos_args = ['self'] self.kw_args = [] fname", "None self.type: str = None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node", "# This is a general function not belonging to any particular class #", "__init__(self, sig: str): self.sig: str = sig self.fname: str = None self.pos_args: List[str]", "Set[str] = None self.type: str = None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip())", "node.keywords)) if isinstance(node.func, ast.Attribute): # This is a method fname = [node.func.attr] node", "isinstance(node.func, ast.Attribute): # This is a method fname = [node.func.attr] node = node.func", "= list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords)) if isinstance(node.func, ast.Attribute): # This is a", "set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed Signature!\")", "return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node): return eval(compile(ast.Expression(node), 'dummy', 'eval'), {'nan': nan})", "= [] fname = [node.attr] while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id)", "# Ideally this should never happen self.fname = node.func.id self.arg_names = set(self.pos_args) |", "= 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node): return eval(compile(ast.Expression(node), 'dummy', 'eval'),", "ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) |", "= [node.func.attr] node = node.func # Sometimes things can be complicated, the following", "None # Keyword arguments have both a name and a default value self.arg_names:", "x: x[0], self.kw_args)) self.type = 'attribute' return raise Exception(\"Malformed Signature!\") @staticmethod def eval_node(node):", "can be complicated, the following is to process # cases like 'DataFrame.at.__getitem__' while", "Keyword arguments have both a name and a default value self.arg_names: Set[str] =", "in ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args =", "nan from typing import List, Tuple, Any, Set class ISignature: def __init__(self, sig:", "general function not belonging to any particular class # Ideally this should never", "not belonging to any particular class # Ideally this should never happen self.fname", "= None # Keyword arguments have both a name and a default value", "= None self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]] = None # Keyword", "[node.func.attr] node = node.func # Sometimes things can be complicated, the following is", "= [node.attr] while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname))", "None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node,", "= node.func # Sometimes things can be complicated, the following is to process", "None self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments", "belonging to any particular class # Ideally this should never happen self.fname =", "typing import List, Tuple, Any, Set class ISignature: def __init__(self, sig: str): self.sig:", "if isinstance(node.func, ast.Attribute): # This is a method fname = [node.func.attr] node =", "= node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return elif isinstance(node,", "while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names =", "self.type: str = None self.init() def init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in", "init(self): root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args =", "value self.arg_names: Set[str] = None self.type: str = None self.init() def init(self): root", "['self'] self.kw_args = [] fname = [node.attr] while isinstance(node.value, ast.Attribute): node = node.value", "while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.type =", "ast from numpy import nan from typing import List, Tuple, Any, Set class", "happen self.fname = node.func.id self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) return", "self.sig: str = sig self.fname: str = None self.pos_args: List[str] = None self.kw_args:", "particular class # Ideally this should never happen self.fname = node.func.id self.arg_names =", "node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda x: x.id, node.args)) self.kw_args", "following is to process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node =", "root = ast.parse(self.sig.lstrip().rstrip()) for node in ast.walk(root): if isinstance(node, ast.Call): self.pos_args = list(map(lambda", "fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type", "arguments have both a name and a default value self.arg_names: Set[str] = None", "to process # cases like 'DataFrame.at.__getitem__' while isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr)", "\".\".join(reversed(fname)) self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args)) self.type = 'attribute' return", "self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments have", "isinstance(node.value, ast.Attribute): node = node.value fname.append(node.attr) fname.append(node.value.id) self.fname = \".\".join(reversed(fname)) self.arg_names = set(self.pos_args)", "= sig self.fname: str = None self.pos_args: List[str] = None self.kw_args: List[Tuple[str, Any]]", "str): self.sig: str = sig self.fname: str = None self.pos_args: List[str] = None" ]
[ "self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData)", "'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should", "def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100,", "5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars)", "\"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars =", "[], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars", "self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars = [(1,", "'update200']) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock()", "'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1", "testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar,", "MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData", "Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults", "2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData)", "200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements =", "call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200'])", "'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\")", "result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100',", "= [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN", "= self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self):", "= [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName =", "MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car:", "[] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify", "2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars =", "DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks)", "def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames)", "(1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames", "self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData", "= MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed, with", "MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\")", "cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar,", "= MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def", "('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100,", "ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100,", "= call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2])", "CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData =", "'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1,", "6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData)", "1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack =", "[(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\")", "carNames = [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData)", "= Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self):", "5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2,", "from unittest.mock import MagicMock, call from timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess", "= [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1,", "MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car',", "self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self):", "4 speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES", "= self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks", "['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789,", "self.thing = DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks", "\"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks =", "= MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler", "Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames =", "def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData", "gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData =", "[1, 2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData =", "= MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self):", "[(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData)", "MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic", "testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result", "= MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface,", "loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars =", "100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData =", "= self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE\") def", "tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000)", "MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self):", "= self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1,", "'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames", "MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\") def", "self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData", "testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')]", "1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar =", "123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self):", "timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults = MagicMock()", "self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def", "self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')]", "= MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200,", "call from timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self):", "def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack =", "TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def", "(200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern", "'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2])", "car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar", "Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData)", "testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData =", "\"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10,", "self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return", "self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2", "MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100,", "self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self):", "= MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self):", "from timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database", "self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar", "with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6", "self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 =", "testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000,", "call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789,", "[('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result),", "self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH\") secondCarInterface =", "MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic", "def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200]", "MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN',", "self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks =", "(2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should", "(6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface =", "def testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5)", "10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks)", "MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks", "self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'),", "shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result =", "Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks", "200], 123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 =", "self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO", "all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar =", "(0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4", "= DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks =", "'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete']", "\"Wrong ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000,", "= MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\")", "MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return all cars\")", "MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')])", "self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData =", "call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) if __name__ == \"__main__\": unittest.main()", "[] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\")", "tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1,", "= self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200'])", "Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'),", "[\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData =", "MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData", "= ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200],", "\"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData =", "self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'),", "= [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData =", "MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100')", "'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) if __name__ == \"__main__\":", "secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE\")", "self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 =", "NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData", "PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames)", "shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern", "def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar =", "call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements =", "testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [],", "123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN',", "Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def", "self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars =", "MagicMock, call from timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def", "import unittest from unittest.mock import MagicMock, call from timerecorder.database import Database from timerecorder.databaseAccess", "10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1,", "[100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements", "carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100,", "= call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200')", "self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all", "'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements", "self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler =", "= [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData", "100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars =", "self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 =", "Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData)", "call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName =", "handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData = [(None)]", "PADDLES shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData", "['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler)", "def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack,", "class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database)", "[('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName", "trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789,", "def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName", "self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames)", "def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2", "self.assertEqual(loadedCar, [1, 2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData", "from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults =", "= MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100')", "'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def", "updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName", "updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic Car',", "loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self):", "with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates =", "return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames =", "MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars", "'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler)", "123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint',", "self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2)", "= [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData", "[(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong", "identify car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars)", "CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6 speed, with", "Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1", "self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames =", "[], updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern", "MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2", "= [(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack,", "5) self.assertEqual(loadedCar, [1, 2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)]", "testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5)", "loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks =", "return all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar", "MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4),", "MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks =", "= ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData", "call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1,", "tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000,", "speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting,", "MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')]", "MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed, with manual", "def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000)", "self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1)", "'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200],", "DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'),", "self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\")", "clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car:", "= [] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't", "unittest.mock import MagicMock, call from timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess class", "Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData =", "= [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates)", "setUp(self): self.database = Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass", "= ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [],", "= MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1,", "carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)]", "cars = [(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100,", "self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed,", "self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'), (2, 'car2')]", "\"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars = MagicMock(return_value=cars)", "MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)]", "DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults = MagicMock() self.thing =", "100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars = [(1, 'car1'),", "self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0), (1)] self.database.loadHandbrakeData", "= MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self):", "loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars", "noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName", "= MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData", "= MagicMock(side_effect=trackNames) updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 = call('Sprint',", "10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2,", "= MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler = MagicMock()", "= self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars", "timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database =", "[], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789,", "= [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car']", "\"Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface,", "= MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown", "handbrakeData = [(0), (1)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData", "updateHandler = MagicMock() self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789,", "= [(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5)", "shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData =", "2 PADDLES shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')]", "= MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)]", "tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't", "testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack,", "self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames =", "call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1,", "('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName =", "[(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames)", "[(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface", "[(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData) firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting,", "def setUp(self): self.database = Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self):", "testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200']) trackNames = ['Sprint', 'Complete'] self.database.getTrackName = MagicMock(side_effect=trackNames) updateHandler", "'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2],", "self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def", "firstCarInterface = self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH\")", "MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData =", "all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown", "[1, 2], \"Should return all tracks\") def testIdentifyCarUnambiguous(self): cars = [(1, 'car1')] self.database.loadCars", "(2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2],", "[(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"] noneData = [(None)] self.database.loadShiftingData =", "'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData", "= MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return all", "self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def", "updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN', 'Modern Car',", "self.database.loadShiftingData = MagicMock(side_effect=noneData) self.database.loadGearsData = MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]),", "= MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks", "self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack", "= MagicMock(side_effect=shiftingData) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData =", "= self.thing.describeCarInterfaces(1) self.assertEqual(firstCarInterface, \"Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH\") secondCarInterface", "Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [],", "track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack", "\"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames", "= MagicMock(side_effect=noneData) self.database.loadClutchData = MagicMock(side_effect=noneData) self.database.getCarName = MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL", "[], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks", "unittest from unittest.mock import MagicMock, call from timerecorder.database import Database from timerecorder.databaseAccess import", "Car: H-PATTERN shifting, 4 speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern", "tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack =", "cars = [] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [],", "pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10,", "= MagicMock(side_effect=carNames) updateHandler = MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 = call('UNKNOWN',", "def tearDown(self): pass def testIdentifyTrackUnambiguous(self): tracks = [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack", "\"Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData =", "ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000)", "[(1, 'car1'), (2, 'car2')] self.database.loadCars = MagicMock(return_value=cars) loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar,", "['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData =", "'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car',", "updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200')", "[(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic", "= self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars = []", "[(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1,", "200], 123456789, [], updateHandler) call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete',", "loadedCar = self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return all cars\") def", "manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2 PADDLES shifting, 6 speed,", "self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = [] self.database.loadTracks =", "carCandidates = [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def", "call('Sprint', 'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) if", "= call('UNKNOWN', 'Modern Car', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) def testHandleTrackUpdatesInvokesLambda(self): self.database.getTrackUpdateStatements = MagicMock(return_value=['update100',", "import Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test')", "cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData = [(None)] self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData) carNames = [\"Unknown Car\"]", "speed, with HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates", "MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) updateHandler =", "self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, 1, \"Wrong ID\") def testIdentifyCarNoResult(self): cars = [] self.database.loadCars", "H-PATTERN shifting, 4 speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car:", "self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [1, 2], \"Should return all cars\") def testGetCarInterfacesStatementWithoutData(self): handbrakeData", "self.database = Database('test') self.database.recordResults = MagicMock() self.thing = DatabaseAccess(self.database) def tearDown(self): pass def", "self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1),", "import DatabaseAccess class TestDatabaseAccess(unittest.TestCase): def setUp(self): self.database = Database('test') self.database.recordResults = MagicMock() self.thing", "= [100, 200] result = self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self):", "shifting, 4 speed, with manual CLUTCH\") secondCarInterface = self.thing.describeCarInterfaces(2) self.assertEqual(secondCarInterface, \"Modern Car: 2", "self.thing.mapCarsToShifting(carCandidates) self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames", "import MagicMock, call from timerecorder.database import Database from timerecorder.databaseAccess import DatabaseAccess class TestDatabaseAccess(unittest.TestCase):", "= MagicMock(side_effect=handbrakeData) shiftingData = [('H-PATTERN'), ('2 PADDLES')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carNames = ['Classic", "testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern Car'] self.database.getCarName =", "= MagicMock() self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789,", "'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55, 10000) self.assertEqual(loadedTrack, [1, 2], \"Should return", "self.assertEqual(loadedTrack, [], \"Shouldn't identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')]", "testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(55,", "HANDBRAKE\") def testMapToShiftingData(self): shiftingData = [('H-PATTERN'), ('SEQUENTIAL')] self.database.loadShiftingData = MagicMock(side_effect=shiftingData) carCandidates = [100,", "'UNKNOWN', 123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) if __name__", "loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong ID\") def testIdentifyTrackNoResult(self): tracks = []", "identify track\") def testIdentifyTrackAmbiguous(self): tracks = [(1, 'track1'), (2, 'track2')] self.database.loadTracks = MagicMock(return_value=tracks)", "'SEQUENTIAL')]) def testHandleCarUpdatesInvokesLambda(self): self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200']) carNames = ['Classic Car', 'Modern Car']", "= [(1, 'track1')] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, 1, \"Wrong", "Car', 'Modern Car'] self.database.getCarName = MagicMock(side_effect=carNames) gearsData = [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData)", "= [] self.database.loadTracks = MagicMock(return_value=tracks) loadedTrack = self.thing.identifyTrack(10, 10000) self.assertEqual(loadedTrack, [], \"Shouldn't identify", "123456789, 'update100') call2 = call('Complete', 'UNKNOWN', 123456789, 'update200') updateHandler.assert_has_calls([call1, call2]) if __name__ ==", "123456789, [], updateHandler) call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100') call2 = call('UNKNOWN',", "= MagicMock(side_effect=carNames) self.assertEqual(self.thing.describeCarInterfaces([1]), \"Unknown Car: NO CONTROL DATA\") def testGetCarInterfacesStatements(self): handbrakeData = [(0),", "= [(4), (6)] self.database.loadGearsData = MagicMock(side_effect=gearsData) clutchData = [(1), (0)] self.database.loadClutchData = MagicMock(side_effect=clutchData)", "= self.thing.identifyCar(1000, 100, 5) self.assertEqual(loadedCar, [], \"Shouldn't identify car\") def testIdentifyCarAmbiguous(self): cars =" ]
[ "dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase =", "dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi:", "\\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def", "v): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) # unproject()", "testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg =", "*intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad in-place\", eps", "axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg =", "off the imager (x<0). This is aphysical, but it just means that the", "normalize = True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v", "some of the projected points are behind the camera (z<0), which is #", "mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def", "1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg =", "means that the model was # made up; which it was. The math", "the camera p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0)))", "{intrinsics[0]}\", eps = 1e-6) if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize", "eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1,", "('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))),", "\\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected,", "different ways the # internal computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or", "= f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp", ", 555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512.,", "msg = f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2],", "cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos),", "= f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps =", "that the model was # made up; which it was. The math still", "@nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:],", "[-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002,", "''' import sys import numpy as np import numpysane as nps import os", "f\"dv_di {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) # Normalized", "grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1])", "f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:],", "all combinations of add others here: latlon, lonlat, stereographic. Broadcasted and not. Test", "333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014],", "{intrinsics[0]} (normalized)\", eps = 1e-6) if not meta['has_gradients']: # no in-place output for", "Here I make sure the projection functions return the correct values. A part", "1, msg = f\"Unprojected v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd,", "pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref,", "Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False) cos = nps.inner(v_unprojected, p_ref) /", "out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients = True, out =", "500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512.,", "(1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854,", "mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3],", "not. None behind the camera p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0),", "[-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342], [1229.87308989, 4625.05434521], [4327.8166836 ,", "functions return the correct values. A part of this is a regression test:", "= 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref):", "2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind the camera!", "dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative =", "dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting", "(1512., 1112, 500., 433.), (1512., 1112, 500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2,", "1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\",", "sys import numpy as np import numpysane as nps import os testdir =", "# off the imager (x<0). This is aphysical, but it just means that", "= True, relative = True, eps = 0.01) # a few points, some", "= True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v", "out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients, in-place) are normalized\",", "are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos =", "[-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333.,", "mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics),", "= 0.01) # unproject() with gradients, in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di]", "eps = 0.01) # a few points, some wide, some not. None behind", "i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else:", "unproject() with gradients, in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0", "testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative", "the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896,", "= 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase =", "True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\",", "(normalized, with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref):", "1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104", "647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502.,", "for the no-gradients unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize =", "5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0,", "q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if", "A part of this is a regression test: the \"right\" project() results were", "Note that some of the projected points are behind the camera (z<0), which", "500., 333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516", "qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) )", "is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\", "1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4',", "the imager (x<0). This is aphysical, but it just means that the model", "= f\"Unprojected v (with gradients, in-place) are normalized\", eps = 1e-6) cos =", "1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112, 500.,", "axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase", "testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp,", "np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps =", "and in-place output. I want to check all combinations of add others here:", "-568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2,", ", 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528,", "grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic(", "with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps =", "in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place):", "333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 ,", "2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)),", "1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to", "import mrcal import testutils from test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref): ##########", "model simple: yes/no - broadcasted: yes/no - unproject normalize: yes/no - explicit \"out\"", "{intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0 out[2] *=", "is aphysical, but it just means that the model was # made up;", "(normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di,", ", -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019,", "4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966,", "# testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject() should return the same", "= True, eps = 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics,", "= f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps", "1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368],", "def check(intrinsics, p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg", "1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817,", "simple: yes/no - broadcasted: yes/no - unproject normalize: yes/no - explicit \"out\" in", "0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]]))", "Also note that some of the projected points are # off the imager", "1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *=", "check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371", "1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT',", "worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg =", "msg = f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg =", "1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal(", "dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps = 1e-2) ########## unproject if 1: #####", "eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1,", "2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043,", "return the correct values. A part of this is a regression test: the", "testdir = os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal since that's what I'm", "]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112,", "internal computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC'", "f\"Unprojected v (with gradients, in-place) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected,", "-0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [", "True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients)", "np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918,", "i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di", "points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]),", "(with gradients) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)", "aphysical, but it just means that the model was # made up; which", "out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True,", "- unproject normalize: yes/no - explicit \"out\" in args check() covers all of", "*= 0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}", "2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618,", "dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative =", "eps = 1e-6) if not meta['has_gradients']: # no in-place output for the no-gradients", "f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out = q_projected)", "0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase = True, relative = True,", "3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918,", "-860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.),", "np import numpysane as nps import os testdir = os.path.dirname(os.path.realpath(__file__)) # I import", "f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps =", "= mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda pi:", "cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place", "dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative =", "np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1: ##### Normalized", "just means that the model was # made up; which it was. The", "in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def", "unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to turn this", "np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ### unproject", "2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857,", "# a few points, some wide, some not. None behind the camera p", "since that's what I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils from", "out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad in-place\", eps = 1e-2)", "1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001],", "1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal(", "mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps", "path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out = v_unprojected)", "0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336,", "the same thing whether get_gradients or not\", # eps = 1e-6) # Two", "\"out\" in args check() covers all of these for ONE model ''' import", "651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771,", "= True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with", "[-1163.2, 766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502.,", "1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[", "nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\",", "1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482", "v (with gradients, in-place) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref)", "-770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002,", "numpy as np import numpysane as nps import os testdir = os.path.dirname(os.path.realpath(__file__)) #", "gradients, in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *=", "0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True, out", "check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500.,", "qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref):", "-1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps =", "eps = 0.01) # unproject() with gradients, in-place if 1: # Normalized output", "*intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *= 0", "(1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254,", "f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\",", "-1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))),", "dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True, relative = True,", "2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366,", "grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps =", "965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5,", "0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342], [1229.87308989, 4625.05434521], [4327.8166836 , 3183.44237796]]))", "f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg =", "currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject() should return", "msg = f\"Unprojected v (with gradients, in-place) are normalized\", eps = 1e-6) cos", "-0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342], [1229.87308989, 4625.05434521], [4327.8166836", "-2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014,", "computations, to match the two different ways the # internal computation is performed", "-2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8,", "dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1: ##### Normalized v_unprojected_nograd", "433.), (1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]]))", "872.53696336, -731.32905711]])) # Note that some of the projected points are behind the", "= grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\", eps =", "2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271,", "is flagged. This also test gradients, normalization and in-place output. I want to", "the two different ways the # internal computation is performed if intrinsics[0] ==", "dtype=float), msg = f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq,", "unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True) testutils.confirm_equal(", "*intrinsics, normalize = False, get_gradients = True, out = out) cos = nps.inner(v_unprojected,", "get_gradients: yes/no - model simple: yes/no - broadcasted: yes/no - unproject normalize: yes/no", "-559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112,", "I'd like to turn this on, but unproject() doesn't behave the way it", "should, so this test always fails currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd,", "2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907,", "explicit \"out\" in args check() covers all of these for ONE model '''", "def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref =", "as np import numpysane as nps import os testdir = os.path.dirname(os.path.realpath(__file__)) # I", "True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients) are normalized\", eps", "of add others here: latlon, lonlat, stereographic. Broadcasted and not. Test the project()", "0.002))), p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928", "out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize", "True, eps = 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize", "True, get_gradients = True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected", "test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407,", "normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos,", "with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps", "dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\", eps", "doesn't behave the way it # should, so this test always fails currently", "1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps", "[ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001,", "the camera (z<0), which is # possible with these models. Also note that", "or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda", "= True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with", "= out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients, in-place) are", "np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1: #####", ") def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref", "msg = f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\",", "output for the no-gradients unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize", "was. The math still works normally, and this is just fine as #", "intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting", "eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0 out[2] *= 0", "[ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001,", "the # internal computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0]", "also test gradients, normalization and in-place output. I want to check all combinations", "2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40',", "np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771,", "msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True,", "eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di,", "-731.32905711]])) # Note that some of the projected points are behind the camera", "unproject if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False) cos", "np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define(", "v (with gradients) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) /", "points, some wide, some not. None behind the camera p = np.array(((1.0, 2.0,", "eps = 1e-6) # Two different gradient computations, to match the two different", "testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps", "not. Test the project() and unproject() paths - project/unproject - get_gradients: yes/no -", "p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR',", "eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref,", "worstcase = True, relative = True, eps = 0.01) # unproject() with gradients,", "-0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369,", "(1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267,", "unproject() Here I make sure the projection functions return the correct values. A", "get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients) are", "((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True),", "f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps =", "p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE',", "*= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients = True,", "-0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123,", "f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if not meta['has_gradients']: # no in-place output", "= f\"dq_di in-place\", eps = 1e-2) ########## unproject if 1: ##### Un-normalized v_unprojected", "but it just means that the model was # made up; which it", "grad def check(intrinsics, p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref,", "np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p,", "2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213,", "gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected),", "Two different gradient computations, to match the two different ways the # internal", "dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative = True, eps =", "(non-normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq", "('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))),", "normalize = False, get_gradients = True, out = out) cos = nps.inner(v_unprojected, p_ref)", "[ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002,", "q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2)", "1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase = True,", "msg = f\"Projecting {intrinsics[0]} with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg", "-0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check(", "eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative", "relative = True, eps = 0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0]", "0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting", "that some of the projected points are behind the camera (z<0), which is", "= f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq:", "1112, 500., 333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042 ],", "grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with", "are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos =", "1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302,", "*= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True,", "yes/no - broadcasted: yes/no - unproject normalize: yes/no - explicit \"out\" in args", "], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001,", "0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263,", "grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True, relative", "grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True,", "testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative", "= f\"dv_di {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) #", "[-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500.,", "{intrinsics[0]} with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\",", "3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918,", "False, get_gradients = True, out = out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)", "np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p,", "get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad in-place\", eps =", "True, relative = True, eps = 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di =", "2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688,", "no in-place output for the no-gradients unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected,", "{intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase", "q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad\",", "if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2]", "testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq,", "1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546],", "ONE model ''' import sys import numpy as np import numpysane as nps", "dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di", "1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg", "4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [", "((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1))", "2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind the", "= True, relative = True, eps = 0.01) # unproject() with gradients, in-place", "np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps", "of the projected points are behind the camera (z<0), which is # possible", "mrcal import testutils from test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref): ########## project", "\\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi:", "q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\",", "os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal since that's what I'm testing sys.path[:0]", "p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771,", "3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note", "v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di,", "qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2],", "dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di", "out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal(", "gradients, normalization and in-place output. I want to check all combinations of add", "model was # made up; which it was. The math still works normally,", "1112, 500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135. ]]))", "= 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True,", "# no in-place output for the no-gradients unproject() path return v_unprojected *= 0", "msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01)", "if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd),", "= 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps = 1e-2) ########## unproject", "fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649,", "= f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def", "v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected", "dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative =", "f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps =", "relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v,", "testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3],", "axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda", "msg = f\"dv_di {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01)", "with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized", "500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482 ],", "the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441", "]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))), p,", "1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[", "no-gradients unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out", "533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112,", "p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]]))", "1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066,", "q_ref, msg = f\"Projecting {intrinsics[0]} with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3],", "1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True,", "wide, some not. None behind the camera p = np.array(((1.0, 2.0, 10.0), (-1.1,", "{intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) if 1: #", "q_projected *= 0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting", "0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients", "if not meta['has_gradients']: # no in-place output for the no-gradients unproject() path return", "'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return", "= 0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *=", "\"right\" project() results were recorded at some point, and any deviation is flagged.", "# eps = 1e-6) # Two different gradient computations, to match the two", "(normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq", "imager (x<0). This is aphysical, but it just means that the model was", "2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467,", "like to turn this on, but unproject() doesn't behave the way it #", "('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691", "np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps", "v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) #", "0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448", "meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]),", "-0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968],", "= True, eps = 0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *=", "(-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112, 500., 433.),", "= np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\",", "= True, get_gradients = True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg =", "testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are normalized\", eps = 1e-6) cos", "check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2,", "get_gradients = True, out = out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos", "in args check() covers all of these for ONE model ''' import sys", "{intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\",", "p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500.,", "eps = 1e-2) ########## unproject if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics,", "in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0", "np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6)", "3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407,", "-1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps =", "mrcal since that's what I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils", "= mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to turn this on, but unproject()", "I make sure the projection functions return the correct values. A part of", "-0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448,", "333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516 ,", "533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112,", "1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396,", "yes/no - model simple: yes/no - broadcasted: yes/no - unproject normalize: yes/no -", "= True, eps = 0.01) # a few points, some wide, some not.", "[-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522.,", "True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized", "(1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check(", "eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\", "camera (z<0), which is # possible with these models. Also note that some", "= True, eps = 0.01) # unproject() with gradients, in-place if 1: #", "-249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016,", "= out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1)", "v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos", "in-place {intrinsics[0]}\", eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True)", "0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0", "this on, but unproject() doesn't behave the way it # should, so this", "0.01) # a few points, some wide, some not. None behind the camera", "= True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are normalized\", eps =", "552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112,", "behave the way it # should, so this test always fails currently #", "('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807,", "= 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True,", "True, eps = 0.01) # unproject() with gradients, in-place if 1: # Normalized", "any deviation is flagged. This also test gradients, normalization and in-place output. I", "np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112,", "same thing whether get_gradients or not\", # eps = 1e-6) # Two different", "p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]]))", "- broadcasted: yes/no - unproject normalize: yes/no - explicit \"out\" in args check()", "f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg =", "('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))),", "intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref,", "def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))),", "normalize = True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place", "dtype=float), msg = f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq,", "q_ref, msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics,", "args check() covers all of these for ONE model ''' import sys import", "and any deviation is flagged. This also test gradients, normalization and in-place output.", "-1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522.,", "testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg =", "[ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500.,", "[-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500.,", "{intrinsics[0]}\", eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) #", "True, relative = True, eps = 0.01) # unproject() with gradients, in-place if", "this test always fails currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg", "533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC',", "2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519,", "testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject() should return the same thing", "check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056,", "which it was. The math still works normally, and this is just fine", "testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative = True, eps", "grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref", "0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636],", "just fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628,", "@nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic(", "= 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg", "1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897],", "worstcase = True, relative = True, eps = 0.01) if 1: # un-normalized", "normally, and this is just fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([", "I import the LOCAL mrcal since that's what I'm testing sys.path[:0] = f\"{testdir}/..\",", "-0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342], [1229.87308989, 4625.05434521],", "(with gradients, in-place) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) /", "msg = f\"dq_di in-place\", eps = 1e-2) ########## unproject if 1: ##### Un-normalized", "f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps =", "def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref =", "is a regression test: the \"right\" project() results were recorded at some point,", "0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check(", "yes/no - explicit \"out\" in args check() covers all of these for ONE", "f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps =", "2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581,", "-0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648],", "1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False) cos = nps.inner(v_unprojected,", "part of this is a regression test: the \"right\" project() results were recorded", "f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',))", "msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if not meta['has_gradients']: # no", "relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase", "1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if", "= mrcal.unproject(q_projected, *intrinsics, normalize = False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos", "*= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True, out = out)", "1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319,", "check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327,", "others here: latlon, lonlat, stereographic. Broadcasted and not. Test the project() and unproject()", "for ONE model ''' import sys import numpy as np import numpysane as", "f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\",", "import numpysane as nps import os testdir = os.path.dirname(os.path.realpath(__file__)) # I import the", "True, eps = 0.01) # a few points, some wide, some not. None", "in-place output for the no-gradients unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics,", "500., 333., -0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442", "np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333.,", "whether get_gradients or not\", # eps = 1e-6) # Two different gradient computations,", "few points, some wide, some not. None behind the camera p = np.array(((1.0,", "0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients", "*= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False,", "[1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716,", "dtype=float), msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',))", "= 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:],", "msg = f\"Unprojected v (with gradients) are normalized\", eps = 1e-6) cos =", "testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients, in-place) are normalized\", eps", "unproject() doesn't behave the way it # should, so this test always fails", "True, eps = 0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0", "= 1e-6) if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True)", "= 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like", "f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0 out[2]", "eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd", "p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg", "np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients)", "552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112,", "1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [", "intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref,", "{intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) # a few", "cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized,", "-1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112, 500., 433.), (1512.,", "654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002))),", "True, out = out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos,", "and unproject() Here I make sure the projection functions return the correct values.", "testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ###", "4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923,", "= f\"Unprojected v (with gradients) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected,", "= f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if not meta['has_gradients']: # no in-place", "433.), (1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]]))", "2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind", "1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302,", "correct values. A part of this is a regression test: the \"right\" project()", "= os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal since that's what I'm testing", "out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients =", "mrcal.unproject(q_projected, *intrinsics, normalize = False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos =", "the LOCAL mrcal since that's what I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal", "a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294,", "msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1: ##### Normalized v_unprojected_nograd =", "check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[", ") def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref", "nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float),", "1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6)", "0.035, -0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516 , 678.01468 ],", "2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind the camera! np.array([[-0.8479983,", "works normally, and this is just fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200',", "at some point, and any deviation is flagged. This also test gradients, normalization", "*intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2],", "f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref,", "it just means that the model was # made up; which it was.", "msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True,", "relative = True, eps = 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected,", "i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref =", "<gh_stars>10-100 #!/usr/bin/python3 r'''Tests for project() and unproject() Here I make sure the projection", "*= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg =", "the no-gradients unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True,", "0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True,", "normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v):", "1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637,", "in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) # a", "(normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) #", "msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out", "np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.),", "mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg =", "to check all combinations of add others here: latlon, lonlat, stereographic. Broadcasted and", "'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]),", "relative = True, eps = 0.01) # unproject() with gradients, in-place if 1:", "nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with", "testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True, relative =", "1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg", "('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))),", "= q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta", "camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342],", "np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps =", "the projected points are # off the imager (x<0). This is aphysical, but", "v): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:],", "relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v):", "496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002,", "True, relative = True, eps = 0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di]", "/ nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg =", "in-place output. I want to check all combinations of add others here: latlon,", "= 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1)", "dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase = True, relative = True, eps =", "Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True)", "0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162", "== 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref):", "533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112,", "0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg", "-573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112,", "and not. Test the project() and unproject() paths - project/unproject - get_gradients: yes/no", ", 555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512.,", "grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref,", "values. A part of this is a regression test: the \"right\" project() results", "-0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783],", "pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics,", "-0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 ,", "= mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad\", eps", "mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients = True, out = out) cos =", "1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\",", "= f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di,", "-0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814],", "are # off the imager (x<0). This is aphysical, but it just means", "LOCAL mrcal since that's what I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal import", "333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255],", "msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps", "latlon, lonlat, stereographic. Broadcasted and not. Test the project() and unproject() paths -", "get_gradients = True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v", "flagged. This also test gradients, normalization and in-place output. I want to check", "], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note that some of the", "out[1] *= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg", "meta['has_gradients']: # no in-place output for the no-gradients unproject() path return v_unprojected *=", "q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg", "eps = 0.01) if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1]", "555.4], [-1163.2, 766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.),", "a few points, some wide, some not. None behind the camera p =", "(1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check(", "2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348,", "broadcasted: yes/no - unproject normalize: yes/no - explicit \"out\" in args check() covers", "-0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783,", "*intrinsics, get_gradients=True) # I'd like to turn this on, but unproject() doesn't behave", "are behind the camera (z<0), which is # possible with these models. Also", "output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics,", "cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized,", "# Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *= 0", "1513.20891648], [ 872.53696336, -731.32905711]])) # Note that some of the projected points are", "1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note that some of", "1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps = 1e-2) ########## unproject if", "954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016,", "2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind the camera! np.array([[-0.8479983, -0.52999894,", "500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332,", "= 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative =", "= 1e-6) # Two different gradient computations, to match the two different ways", "f\"Unprojected in-place v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) /", "1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check(", "0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase = True,", "== 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0],", "some not. None behind the camera p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3,", "2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), #", "\\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected,", "unproject() paths - project/unproject - get_gradients: yes/no - model simple: yes/no - broadcasted:", "= mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected", "check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8,", "-0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [", "= f\"Unprojected in-place v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref)", "mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad in-place\",", "check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327,", "testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative", "qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]}", "# some points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792,", "*intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are normalized\",", "True, relative = True, eps = 0.01) # a few points, some wide,", "1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase = True, relative = True,", "import os testdir = os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal since that's", "= v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v are normalized\", eps", "project() and unproject() paths - project/unproject - get_gradients: yes/no - model simple: yes/no", "testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase = True, relative = True, eps", "0.01) # unproject() with gradients, in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0]", "1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302,", "as nps import os testdir = os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal", "1, msg = f\"Unprojected v (with gradients, in-place) are normalized\", eps = 1e-6)", "555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302,", "np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps = 1e-6)", "out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out)", "i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1))", "1e-6) if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal(", "gradient computations, to match the two different ways the # internal computation is", "== 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or", "# un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *= 0", "p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500.,", "normalization and in-place output. I want to check all combinations of add others", "np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if not", "-0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042 ], [-1223.38516 , 678.01468", "msg = f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di =", "in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps = 1e-2)", "{intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) # unproject() with", "with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return", "678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035,", "normalize = False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1,", "{intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2)", "with gradients, in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1]", "testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps", "testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True, relative =", "-0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342], [1229.87308989, 4625.05434521], [4327.8166836 , 3183.44237796]])) testutils.finish()", "-0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448 ],", "thing whether get_gradients or not\", # eps = 1e-6) # Two different gradient", "True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase", "grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref,", "This is aphysical, but it just means that the model was # made", "0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [", "worstcase = True, relative = True, eps = 0.01) # a few points,", "np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678,", "*= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with", "yes/no - unproject normalize: yes/no - explicit \"out\" in args check() covers all", "# Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients =", "testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v are normalized\", eps = 1e-6)", "grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2)", "2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329,", "= True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di", "651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8',", "-0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [", "-0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [", "1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1,", "0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]]))", "# internal computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] ==", "= f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected,", "555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112,", "*intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps =", "500., 433.), (1512., 1112, 500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [", "check(intrinsics, p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg =", "0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366,", "524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649,", "-0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]]))", "##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False) cos = nps.inner(v_unprojected, p_ref)", "v_unprojected_nograd, # msg = f\"Unproject() should return the same thing whether get_gradients or", "made up; which it was. The math still works normally, and this is", "meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda", "check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2,", "with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized", ", 678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012,", "def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1))", "grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq,", "*intrinsics, normalize = False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos,", "True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\",", "[ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note that some of the projected", "np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371 , 555.23042", "0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients", "f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01)", "eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di,", "the model was # made up; which it was. The math still works", "0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients = True, out = out) cos", "in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:],", "mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to turn this on, but unproject() doesn't", "as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716,", "testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps =", "mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are", "eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase = True, relative", "gradients) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos", "1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return", "out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients =", "is just fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5,", "dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if not meta['has_gradients']: #", "always fails currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject()", "mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *=", "q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta =", "it # should, so this test always fails currently # # testutils.confirm_equal( v_unprojected,", "423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note that some of the projected points", "1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ],", "if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False) cos =", "intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT':", "the project() and unproject() paths - project/unproject - get_gradients: yes/no - model simple:", "1112, 500., 433.), (1512., 1112, 500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6],", "2.138994213, 1.114846113,],)), # some points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794,", "-0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check(", "4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919,", "# unproject() with gradients, in-place if 1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *=", "-1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6)", "behind the camera p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5,", "0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([", "v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to turn this on, but", "v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v are normalized\", eps =", "v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)", "[ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002,", "testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if", "-0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931,", "{intrinsics[0]} in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) )", "mrcal.unproject(q_projected, *intrinsics, normalize = True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg =", "worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg =", "mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg =", "check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112, 500., 433.), (1512., 1112, 500.,", "# a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154,", "0.014, -0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943", "1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check(", "performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0]", "relative = True, eps = 0.01) # a few points, some wide, some", "add others here: latlon, lonlat, stereographic. Broadcasted and not. Test the project() and", "], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001,", ") def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref,", "2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points", "msg = f\"Unprojected v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref)", "= 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:],", "*intrinsics, normalize = True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected", "nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float),", "msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True,", "-1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\",", "[ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637,", "= True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place):", "here: latlon, lonlat, stereographic. Broadcasted and not. Test the project() and unproject() paths", "np.array(((1512., 1112, 500., 333.), (1512., 1112, 500., 433.), (1512., 1112, 500., 533.)))), p,", "= 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase =", "not\", # eps = 1e-6) # Two different gradient computations, to match the", "0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00',", "p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note that", "= 1e-2) ########## unproject if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize", "@nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref,", "766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112,", "333.), (1512., 1112, 500., 433.), (1512., 1112, 500., 533.)))), p, np.array([[ 651.2, 555.4],", "{intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg", "500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533,", "= True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\",", "The math still works normally, and this is just fine as # a", "np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367],", "1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637,", "650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012,", "msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\",", "in-place v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)", "########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps", "= f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out =", "651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500.,", "= f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']: @nps.broadcast_define(", "I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers import grad", "*intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2)", "return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define(", "# Two different gradient computations, to match the two different ways the #", "in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp in-place\", eps = 1e-2)", "-594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2,", "else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\", "intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq", "out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}", "check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500.,", "= 0.01) # a few points, some wide, some not. None behind the", "= grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True,", "433.), (1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]]))", "(1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check(", "project() and unproject() Here I make sure the projection functions return the correct", "up; which it was. The math still works normally, and this is just", "dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True, relative = True,", "np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919],", "cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos),", "the correct values. A part of this is a regression test: the \"right\"", "stereographic. Broadcasted and not. Test the project() and unproject() paths - project/unproject -", "0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315,", "2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146,", "still works normally, and this is just fine as # a test check(", "False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal(", "551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035,", "= f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected,", "grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref =", "[-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001,", "1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302],", "axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\",", "qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized", "gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to turn this on,", "np.array([[ 647.79131656, 552.50386255], [-718.86844854, 757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.),", "testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0", "333., -0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176", "recorded at some point, and any deviation is flagged. This also test gradients,", "0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274,", "2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) #", "= 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.project(p_ref,", "np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112,", "nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v are normalized\", eps = 1e-6) cos", "grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',))", "check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[", "1e-6) if not meta['has_gradients']: # no in-place output for the no-gradients unproject() path", "eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di]", "= True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di", "= True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients) are normalized\",", "[ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002,", "0 out[1] *= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref,", "import numpy as np import numpysane as nps import os testdir = os.path.dirname(os.path.realpath(__file__))", "p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting", "0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True, relative", "# v_unprojected_nograd, # msg = f\"Unproject() should return the same thing whether get_gradients", "np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ### unproject gradients", "# # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject() should return the", "points are behind the camera (z<0), which is # possible with these models.", "1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *=", "way it # should, so this test always fails currently # # testutils.confirm_equal(", "0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\",", "some wide, some not. None behind the camera p = np.array(((1.0, 2.0, 10.0),", "np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p,", "2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314,", "1e-2, 2e-2, 3e-2))), p, np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]]))", "1, msg = f\"Unprojected in-place v are normalized\", eps = 1e-6) cos =", "import the LOCAL mrcal since that's what I'm testing sys.path[:0] = f\"{testdir}/..\", import", "0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112,", "on, but unproject() doesn't behave the way it # should, so this test", "= nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],),", "but unproject() doesn't behave the way it # should, so this test always", "= False) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1)", "2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877],", "= f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers import grad def check(intrinsics, p_ref,", "intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON'", "*= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True,", "0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients = True, out", "if meta['has_gradients']: @nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0],", "True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized", "Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg =", "= True, relative = True, eps = 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di", "True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients,", "]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p,", "*intrinsics, normalize = True, get_gradients = True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1,", "np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients,", "is # possible with these models. Also note that some of the projected", "= f\"Unprojected v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) /", "get_gradients=True) # I'd like to turn this on, but unproject() doesn't behave the", "np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p,", "Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected,", "0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]]))", "('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807,", "import testutils from test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref): ########## project q_projected", "1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [", "1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637,", "msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True,", "2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500.,", "testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0])", "2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729, 2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667,", "lonlat, stereographic. Broadcasted and not. Test the project() and unproject() paths - project/unproject", "deviation is flagged. This also test gradients, normalization and in-place output. I want", "model ''' import sys import numpy as np import numpysane as nps import", "f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps =", "point, and any deviation is flagged. This also test gradients, normalization and in-place", "1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics,", "= grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\", "intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected,", "970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016,", "out = out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1,", "np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0,", "-400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[", "nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients, in-place) are normalized\", eps =", "what I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers import", "with these models. Also note that some of the projected points are #", "msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *=", "649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502.,", "os testdir = os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal since that's what", "camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 ,", "(unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01)", "testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients) are normalized\", eps =", "test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics)", "= True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place):", "('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))),", "get_gradients or not\", # eps = 1e-6) # Two different gradient computations, to", "@nps.broadcast_define( ((3,),('N',)) ) def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref,", "projected points are # off the imager (x<0). This is aphysical, but it", "was # made up; which it was. The math still works normally, and", "2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00',", "out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v are normalized\",", "np.array([[ 651.27371 , 555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]])) check(", "((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1))", "behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[", "out = q_projected) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2)", "f\"Unproject() should return the same thing whether get_gradients or not\", # eps =", "- explicit \"out\" in args check() covers all of these for ONE model", "# I'd like to turn this on, but unproject() doesn't behave the way", "np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6) if not meta['has_gradients']:", "covers all of these for ONE model ''' import sys import numpy as", "project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps =", "this is just fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0,", "757.09995546], [-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.),", "'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\", "mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected),", "qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1])", "[-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002,", "import grad def check(intrinsics, p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref, *intrinsics) testutils.confirm_equal(q_projected,", "some points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412,", "were recorded at some point, and any deviation is flagged. This also test", "- model simple: yes/no - broadcasted: yes/no - unproject normalize: yes/no - explicit", "eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\", worstcase", "= mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg", "1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413],", "{intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) # Normalized unprojected", "testing sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers import grad def", "= f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg", "# possible with these models. Also note that some of the projected points", "a regression test: the \"right\" project() results were recorded at some point, and", "1112, 500., 333.), (1512., 1112, 500., 433.), (1512., 1112, 500., 533.)))), p, np.array([[", ", 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628,", "This also test gradients, normalization and in-place output. I want to check all", "= 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1)", "normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos,", "True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase = True,", "eps = 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected, q_ref,", "relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v,", "333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248],", "and this is just fine as # a test check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0,", "= f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps", "testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps = 1e-2) ########## unproject if 1:", "((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2],", "if 1: # un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2]", "q_ref, msg = f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg", "eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase", "1e-6) # Two different gradient computations, to match the two different ways the", "= 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase =", "-1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))),", "(z<0), which is # possible with these models. Also note that some of", "= np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps", "or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0]", "500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 647.79131656,", "[-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200',", "return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1])", "and unproject() paths - project/unproject - get_gradients: yes/no - model simple: yes/no -", "it was. The math still works normally, and this is just fine as", "# should, so this test always fails currently # # testutils.confirm_equal( v_unprojected, #", "f\"Unprojected v (with gradients) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref)", "the projection functions return the correct values. A part of this is a", "in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place):", "out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True, out =", "10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.),", "sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers import grad def check(intrinsics,", "normalize: yes/no - explicit \"out\" in args check() covers all of these for", "= True, out = out) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos =", "in-place) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos", "True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di (unnormalized v, in-place): {intrinsics[0]}\",", "('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112, 500., 433.), (1512., 1112, 500., 533.)))),", "output. I want to check all combinations of add others here: latlon, lonlat,", "camera p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check(", "[-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001,", "p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135. ]])) check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512.,", "########## unproject if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize = False)", "nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg", "- project/unproject - get_gradients: yes/no - model simple: yes/no - broadcasted: yes/no -", "== 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define(", "= grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}", "np.array([[2140.80289923, 1438.2774104 ], [ 423.27156274, 1513.20891648], [ 872.53696336, -731.32905711]])) # Note that some", "= 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase = True, relative =", "np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps =", "{intrinsics[0]}\", eps = 1e-2) q_projected *= 0 mrcal.project(p_ref, *intrinsics, out = q_projected) testutils.confirm_equal(q_projected,", "651.27371 , 555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5',", "0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad", "1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512., 1112, 500., 433.), (1512., 1112,", "or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) )", "gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v,", "sure the projection functions return the correct values. A part of this is", "= mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v", "msg = f\"Unprojecting (normalized, with gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) )", "f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01)", "= f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps", "test gradients, normalization and in-place output. I want to check all combinations of", "nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting", "not meta['has_gradients']: # no in-place output for the no-gradients unproject() path return v_unprojected", "f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics,", "np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6)", "two different ways the # internal computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE'", "possible with these models. Also note that some of the projected points are", "these for ONE model ''' import sys import numpy as np import numpysane", "so this test always fails currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, #", "of this is a regression test: the \"right\" project() results were recorded at", "msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps", "0 mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True, out = out) testutils.confirm_equal(", "True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are normalized\", eps = 1e-6)", "= f\"dv_dq (normalized v): {intrinsics[0]}\", worstcase = True, relative = True, eps =", "[-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 965.9173441 , 524.31894367], [1246.58668369, 4621.35427783], [4329.41598149,", "testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative", "to match the two different ways the # internal computation is performed if", "or not\", # eps = 1e-6) # Two different gradient computations, to match", "projected points are behind the camera (z<0), which is # possible with these", "check() covers all of these for ONE model ''' import sys import numpy", "True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected in-place v are", "np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with gradients,", "500., 333.), (1512., 1112, 500., 433.), (1512., 1112, 500., 533.)))), p, np.array([[ 651.2,", "out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients, in-place)", "1.114846113,],)), # some points behind the camera! np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387],", "models. Also note that some of the projected points are # off the", "2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581, 2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146, 2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271, 2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314, 2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467, 2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667, 2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907, 2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688, 2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348, 2.138994213, 1.114846113,],)), # some", "grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1])", "these models. Also note that some of the projected points are # off", "f\"Projecting {intrinsics[0]} with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp", "return the same thing whether get_gradients or not\", # eps = 1e-6) #", "1e-2) ########## unproject if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected, *intrinsics, normalize =", "= np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with", "project/unproject - get_gradients: yes/no - model simple: yes/no - broadcasted: yes/no - unproject", "4621.35427783], [4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154,", "normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are normalized\", eps", "cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}", "results were recorded at some point, and any deviation is flagged. This also", "test: the \"right\" project() results were recorded at some point, and any deviation", "# I import the LOCAL mrcal since that's what I'm testing sys.path[:0] =", "#!/usr/bin/python3 r'''Tests for project() and unproject() Here I make sure the projection functions", "combinations of add others here: latlon, lonlat, stereographic. Broadcasted and not. Test the", "-0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375,", "unproject() path return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out =", "698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.),", "= 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0]", "eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase", "dq_dpi_ref[...,3:], msg = f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1]", "f\"Unprojected v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref)", "np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [ 970.11788123, -568.30114806]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528,", "500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492,", "grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg = f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2)", "msg = f\"Projecting {intrinsics[0]} in-place\", eps = 1e-2) meta = mrcal.lensmodel_metadata_and_config(intrinsics[0]) if meta['has_gradients']:", "eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps = 1e-2) ##########", "check all combinations of add others here: latlon, lonlat, stereographic. Broadcasted and not.", "intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2],", "-92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002,", "the way it # should, so this test always fails currently # #", "('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))),", "'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] == 'LENSMODEL_LONLAT': @nps.broadcast_define( ((2,),('N',))", "p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771,", "- get_gradients: yes/no - model simple: yes/no - broadcasted: yes/no - unproject normalize:", "testutils from test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref): ########## project q_projected =", "*= 0 out[1] *= 0 out[2] *= 0 mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out) testutils.confirm_equal(q_projected,", "make sure the projection functions return the correct values. A part of this", "# msg = f\"Unproject() should return the same thing whether get_gradients or not\",", "\\ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1)) dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg", "{intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi:", "p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500.,", "out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected, *intrinsics, normalize =", "{intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\",", "500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON', np.array(((1512.,", "import sys import numpy as np import numpysane as nps import os testdir", "worstcase = True, relative = True, eps = 0.01) # Normalized unprojected gradients", "= f\"dv_dq (unnormalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps", "of the projected points are # off the imager (x<0). This is aphysical,", "1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]} (normalized)\", eps = 1e-6)", "eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v): {intrinsics[0]}\", worstcase =", "-1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with gradients, in-place)", "mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics), q_projected, msg", "in-place\", eps = 1e-2) ########## unproject if 1: ##### Un-normalized v_unprojected = mrcal.unproject(q_projected,", "regression test: the \"right\" project() results were recorded at some point, and any", "-1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (normalized, with gradients, in-place)", "note that some of the projected points are # off the imager (x<0).", "### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, get_gradients=True) # I'd like to turn", "eps = 1e-6) if 1: ##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize =", "np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]])) check(", "= f\"dq_di {intrinsics[0]}\", eps = 1e-2) out=[q_projected,dq_dp,dq_di] out[0] *= 0 out[1] *= 0", "test always fails currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg =", "msg = f\"Unproject() should return the same thing whether get_gradients or not\", #", "('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002, -0.637, -0.002, 0.016))), p, np.array([[ 2143.17840406,", "('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002))), p, np.array([[ 651.27371 ,", "[ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001,", "2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451,", "= np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting in-place {intrinsics[0]}\",", "for project() and unproject() Here I make sure the projection functions return the", "some point, and any deviation is flagged. This also test gradients, normalization and", "433.), (1512., 1112, 500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8,", "*intrinsics, normalize = True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg = f\"Unprojected", "nps import os testdir = os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL mrcal since", "msg = f\"Unprojected in-place v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected,", "v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject() should return the same thing whether", "Test the project() and unproject() paths - project/unproject - get_gradients: yes/no - model", "np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002, 0.019))), p, np.array([[ 651.2740691 ,", "math still works normally, and this is just fine as # a test", "f\"dq_di in-place\", eps = 1e-2) ########## unproject if 1: ##### Un-normalized v_unprojected =", "1, msg = f\"Unprojected v (with gradients) are normalized\", eps = 1e-6) cos", "with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq: {intrinsics[0]}\", worstcase =", "(x<0). This is aphysical, but it just means that the model was #", "680.23499814], [ -770.03274263, -1238.4871943 ]])) check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002,", "('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))),", "], [-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500.,", "f\"Unprojecting in-place {intrinsics[0]}\", eps = 1e-6) ### unproject gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics,", "= False, get_gradients = True, out = out) cos = nps.inner(v_unprojected, p_ref) /", ", 691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012,", "msg = f\"Unprojecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg =", "gradients, in-place) are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)", "from test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref): ########## project q_projected = mrcal.project(p_ref,", "500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 649.35582325,", "un-normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *= 0 mrcal.unproject(q_projected,", "return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di =", "want to check all combinations of add others here: latlon, lonlat, stereographic. Broadcasted", "msg = f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2],", "1112, 500., 533.)))), p, np.array([[ 649.35582325, 552.6874014], [-813.05440267, 698.1222302], [-408.67354332, -573.48815174]])) check( ('LENSMODEL_LATLON',", "##### Normalized v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics, normalize = True) testutils.confirm_equal( nps.norm2(v_unprojected_nograd), 1, msg", "500., 533.)))), p, np.array([[ 650.69900257, 551.44238248], [-751.13786254, 654.42977413], [-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512.,", "of these for ONE model ''' import sys import numpy as np import", "nps.norm2(v_unprojected), 1, msg = f\"Unprojected v (with gradients) are normalized\", eps = 1e-6)", "nps.norm2(v_unprojected_nograd), 1, msg = f\"Unprojected v are normalized\", eps = 1e-6) cos =", "v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected),", "= np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512.,", "different gradient computations, to match the two different ways the # internal computation", "projection functions return the correct values. A part of this is a regression", "*= 0 mrcal.unproject(q_projected, *intrinsics, normalize = False, get_gradients = True, out = out)", "(-1.1, 0.3, 1.0), (-0.9, -1.5, 1.0))) check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.), (1512.,", "0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))), p, np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356],", "get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp,", "[-204.73403533, -559.86662025]])) check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522.,", "points are # off the imager (x<0). This is aphysical, but it just", "the projected points are behind the camera (z<0), which is # possible with", "some of the projected points are # off the imager (x<0). This is", "# made up; which it was. The math still works normally, and this", "None behind the camera p = np.array(((1.0, 2.0, 10.0), (-1.1, 0.3, 1.0), (-0.9,", "-0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))), p, np.array([[2140.34076919, 1437.37148001], [ 496.63465931, 1493.31670636], [", "eps = 0.01) # Normalized unprojected gradients v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize =", "mrcal.project(p_ref, *intrinsics, get_gradients=True) testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]} with grad\", eps =", "489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302, -0.001, 0.002,", "nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1]) else: @nps.broadcast_define( ((2,),('N',)) ) def grad_broadcasted(q_ref, i_ref): return", "2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294, 2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407, 2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327, 2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592,", "(normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01)", "= True, relative = True, eps = 0.01) if 1: # un-normalized output", "2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528,", "paths - project/unproject - get_gradients: yes/no - model simple: yes/no - broadcasted: yes/no", "which is # possible with these models. Also note that some of the", "fails currently # # testutils.confirm_equal( v_unprojected, # v_unprojected_nograd, # msg = f\"Unproject() should", "unproject normalize: yes/no - explicit \"out\" in args check() covers all of these", "that some of the projected points are # off the imager (x<0). This", "= f\"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg", "should return the same thing whether get_gradients or not\", # eps = 1e-6)", "], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500.,", "= True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg = f\"dv_di {intrinsics[0]}\", worstcase =", "= np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting (non-normalized, with", "v are normalized\", eps = 1e-6) cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref) cos", "np.array([[-0.8479983, -0.52999894, -0.34690877], [-0.93984618, 0.34159794, -0.16119387], [-0.97738792, 0.21145412, 5.49068928]]), np.array([[ 958.48347896, 529.99410342], [1229.87308989,", "[-1292.8121176 , 691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333.,", "if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] ==", "= 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:], msg = f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase =", "(1512., 1112, 500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135.", "numpysane as nps import os testdir = os.path.dirname(os.path.realpath(__file__)) # I import the LOCAL", "in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) if 1:", "mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1)) dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1]) q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True)", "# Note that some of the projected points are behind the camera (z<0),", "Broadcasted and not. Test the project() and unproject() paths - project/unproject - get_gradients:", "r'''Tests for project() and unproject() Here I make sure the projection functions return", "f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers import grad def check(intrinsics, p_ref, q_ref):", "= 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2],", ") def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0],", "[4329.41598149, 3183.75121559]])) check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200', np.array([ 1500.0, 1800.0, 1499.5,999.5, 2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628, 2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649, 2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716, 2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154, 2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294,", "1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \\ mrcal.unproject(qi[:2], intrinsics[0],", "testutils.confirm_equal(q_projected, q_ref, msg = f\"Projecting {intrinsics[0]}\", eps = 1e-2) q_projected *= 0 mrcal.project(p_ref,", "gradients) {intrinsics[0]}\", eps = 1e-6) @nps.broadcast_define( ((2,),('N',)) ) def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda", "-0.056, 0.050))), p, np.array([[ 651.1885442 , 555.10514968], [-1234.45480366, 680.23499814], [ -770.03274263, -1238.4871943 ]]))", "v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1,", "= 1e-6) if not meta['has_gradients']: # no in-place output for the no-gradients unproject()", "= f\"dq_dp in-place\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di in-place\", eps", "2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807, 2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678, 2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817, 2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319, 2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396, 2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592, 2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043, 2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366, 2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451, 2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329, 2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519, 2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857, 2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729,", "all of these for ONE model ''' import sys import numpy as np", "\\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \\ intrinsics[0] == 'LENSMODEL_LATLON' or \\ intrinsics[0] ==", "this is a regression test: the \"right\" project() results were recorded at some", "in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dvnormalized_dqi_ref[...,2:],", "np.array([[2140.35607966, 1437.40149368], [ 489.05797783, 1495.37110356], [ 954.60918375, -594.21144463]])) check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528,", "= f\"dv_di (normalized v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps", "turn this on, but unproject() doesn't behave the way it # should, so", "= f\"Projecting {intrinsics[0]} with grad in-place\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg =", "match the two different ways the # internal computation is performed if intrinsics[0]", "gradients, in-place) {intrinsics[0]}\", eps = 1e-6) testutils.confirm_equal(dv_dq, dv_dqi_ref[...,:2], msg = f\"dv_dq (unnormalized v,", "that's what I'm testing sys.path[:0] = f\"{testdir}/..\", import mrcal import testutils from test_calibration_helpers", "555.23042 ], [-1223.38516 , 678.01468 ], [-1246.7310448, -1822.799928 ]])) check( ('LENSMODEL_OPENCV5', np.array((1512., 1112,", "check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500.,", "mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg =", "691.9401448 ], [-1987.550162 , -2730.85863427]])) check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333., -0.012, 0.035,", "ways the # internal computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\", "project() results were recorded at some point, and any deviation is flagged. This", "1: # Normalized output out=[v_unprojected,dv_dq,dv_di] out[0] *= 0 out[1] *= 0 out[2] *=", "[ 872.53696336, -731.32905711]])) # Note that some of the projected points are behind", "(normalized)\", eps = 1e-6) if not meta['has_gradients']: # no in-place output for the", "to turn this on, but unproject() doesn't behave the way it # should,", "p, np.array([[ 2143.17840406, 1442.93419919], [ -92.63813066, 1653.09646897], [ -249.83199315, -2606.46477164]])) check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918,", "the \"right\" project() results were recorded at some point, and any deviation is", "eps = 1e-6) testutils.confirm_equal(dv_dq, dvnormalized_dqi_ref[...,:2], msg = f\"dv_dq (normalized v, in-place): {intrinsics[0]}\", worstcase", "I want to check all combinations of add others here: latlon, lonlat, stereographic.", "= f\"Unproject() should return the same thing whether get_gradients or not\", # eps", "cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\",", "{intrinsics[0]} with grad\", eps = 1e-2) testutils.confirm_equal(dq_dp, dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps", "{intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) testutils.confirm_equal(dv_di, dv_dqi_ref[...,2:], msg", "dq_dpi_ref[...,:3], msg = f\"dq_dp {intrinsics[0]}\", eps = 1e-2) testutils.confirm_equal(dq_di, dq_dpi_ref[...,3:], msg = f\"dq_di", "return grad(lambda qi: \\ mrcal.unproject_stereographic( \\ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1)) dv_dqi_ref", "0.019))), p, np.array([[ 651.2740691 , 555.2309482 ], [-1292.8121176 , 691.9401448 ], [-1987.550162 ,", "normalize = True, get_gradients = True, out = out) testutils.confirm_equal( nps.norm2(v_unprojected), 1, msg", "computation is performed if intrinsics[0] == 'LENSMODEL_PINHOLE' or \\ intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or", "behind the camera (z<0), which is # possible with these models. Also note", "*= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out = v_unprojected) testutils.confirm_equal( nps.norm2(v_unprojected), 1,", "testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],), dtype=float), msg = f\"Unprojecting {intrinsics[0]}\", eps = 1e-6) if 1:", "500., 333.), (1502., 1112, 500., 433.), (1522., 1112, 500., 533.)))), p, np.array([[ 650.69900257,", "500., 533.)))), p, np.array([[ 651.2, 555.4], [-1163.2, 766.6], [ -860.8, -1135. ]])) check(", "= nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref) cos = np.clip(cos, -1, 1) testutils.confirm_equal( np.arccos(cos), np.zeros((p_ref.shape[0],),", "return v_unprojected *= 0 mrcal.unproject(q_projected, *intrinsics, normalize = True, out = v_unprojected) testutils.confirm_equal(", "[-615.34458492, -400.73749463]])) check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333., -0.012, 0.035, -0.001, 0.002))), p,", "v, in-place): {intrinsics[0]}\", worstcase = True, relative = True, eps = 0.01) if" ]
[ "Document class MachineStatus(Document): # return True if API exist in database, otherwise return", "in database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat", "the API once user decide to NOT sync with Node-RED @frappe.whitelist() def disable_API(self):", "# @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id)", "API once user decide to NOT sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi", "decide to NOT sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script',", "MachineStatus(Document): # return True if API exist in database, otherwise return False. @frappe.whitelist()", "return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine", "msl = frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset,", "Script\", \"Mac Stat From NR To ERPNext\"): return \"API exists\" else: return \"API", "once user decide to NOT sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi =", "= frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset, #", "msl.insert() # # msl.docstatus = 1 # # msl.save() # @frappe.whitelist() # def", "frappe from frappe.model.document import Document class MachineStatus(Document): # return True if API exist", "NR To ERPNext\"): return \"API exists\" else: return \"API not found\" # disable", "commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id)", "# # ms.reload() # # ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value) #", "<reponame>msf4-0/ERPNext_my_custom__maintenance # -*- coding: utf-8 -*- # Copyright (c) 2021, cjs and contributors", "ms = frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name,", "def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat From NR To ERPNext\") #", "new_value) # # ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms =", "msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time':", "= frappe.get_doc('Machine Status', ms_id) # # ms.reload() # # ms.sync_with_nr = new_value #", "# # ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value) # # ms.save() #", "and contributors # For license information, please see license.txt from __future__ import unicode_literals", "False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From NR To ERPNext\"):", "new_mac_stat, # 'docstatus': 1 # }) # msl.insert() # # msl.docstatus = 1", "True if API exist in database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if", "@frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id) #", "exist in database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac", "\"API not found\" # disable the API once user decide to NOT sync", "-*- # Copyright (c) 2021, cjs and contributors # For license information, please", "ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id) return", "ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id):", "# # msl.docstatus = 1 # # msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self,", "ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value) # # ms.save() # # ms.reload()", "msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(),", "ms.db_set('sync_with_nr', new_value) # # ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms", "else: return \"API not found\" # disable the API once user decide to", "@frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat From NR To ERPNext\")", "= frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset':", "= frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(),", "database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From", "disable the API once user decide to NOT sync with Node-RED @frappe.whitelist() def", "Status Log') # msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation,", "ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id,", "ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat,", "class MachineStatus(Document): # return True if API exist in database, otherwise return False.", "contributors # For license information, please see license.txt from __future__ import unicode_literals import", "# For license information, please see license.txt from __future__ import unicode_literals import frappe", "return \"API not found\" # disable the API once user decide to NOT", "sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat From", "Status', ms_id) # # ms.reload() # # ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr',", "ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine", "# 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1 # }) # msl.insert()", "return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From NR To", "new_value # # ms.db_set('sync_with_nr', new_value) # # ms.save() # # ms.reload() @frappe.whitelist() def", "ms_id) return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms =", "NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def", "ms_id) # msl = frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status': ms.name, #", "frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status':", "# 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1 #", "please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document", "# }) # msl.insert() # # msl.docstatus = 1 # # msl.save() #", "frappe.model.document import Document class MachineStatus(Document): # return True if API exist in database,", "API exist in database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\",", "@frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status", "frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset,", "1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status',", "utf-8 -*- # Copyright (c) 2021, cjs and contributors # For license information,", "For license information, please see license.txt from __future__ import unicode_literals import frappe from", "Stat From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() #", "# ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms", "ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1", "ms = frappe.get_doc('Machine Status', ms_id) # # ms.reload() # # ms.sync_with_nr = new_value", "'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert() # msl.docstatus = 1 #", "frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): #", "= 1 # # msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): #", "Status', ms_id) return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms", "Log') # msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation, #", "= frappe.get_doc('Server Script', \"Mac Stat From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1,", "# @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status #", "frappe.db.exists(\"Server Script\", \"Mac Stat From NR To ERPNext\"): return \"API exists\" else: return", "Stat From NR To ERPNext\"): return \"API exists\" else: return \"API not found\"", "ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine", "from __future__ import unicode_literals import frappe from frappe.model.document import Document class MachineStatus(Document): #", "Script', \"Mac Stat From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True)", "# 'machine_status': ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), #", "# msl.docstatus = 1 # # msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id,", "'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert() # msl.docstatus =", "import frappe from frappe.model.document import Document class MachineStatus(Document): # return True if API", "# # ms = frappe.get_doc('Machine Status', ms_id) # # ms.reload() # # ms.sync_with_nr", "frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1 # }) # msl.insert() # #", "ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status", "To ERPNext\"): return \"API exists\" else: return \"API not found\" # disable the", "return \"API exists\" else: return \"API not found\" # disable the API once", "From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist()", "frappe.get_doc('Server Script', \"Mac Stat From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True)", "1 # # msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): # #", "# @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): # # ms = frappe.get_doc('Machine Status',", "# ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value) # # ms.save() # #", "Copyright (c) 2021, cjs and contributors # For license information, please see license.txt", "def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist() #", "'current_machine_status': new_mac_stat, # 'docstatus': 1 # }) # msl.insert() # # msl.docstatus =", "def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status Log')", "# Copyright (c) 2021, cjs and contributors # For license information, please see", "not found\" # disable the API once user decide to NOT sync with", "new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine Status Log')", "Status', ms_id) # msl = frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status': ms.name,", "frappe.get_doc('Machine Status', ms_id) # # ms.reload() # # ms.sync_with_nr = new_value # #", "ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert() # msl.docstatus", "msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1", "'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1 # })", "From NR To ERPNext\"): return \"API exists\" else: return \"API not found\" #", "'machine_status': ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status':", "ms = frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine Status Log') # msl.update({", "ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id)", "# def update_sync_with_nr_flag(self, ms_id, new_value): # # ms = frappe.get_doc('Machine Status', ms_id) #", "Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus':", "2021, cjs and contributors # For license information, please see license.txt from __future__", "1 # }) # msl.insert() # # msl.docstatus = 1 # # msl.save()", "new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status':", "user decide to NOT sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server", "-*- coding: utf-8 -*- # Copyright (c) 2021, cjs and contributors # For", "ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation,", "\"Mac Stat From NR To ERPNext\"): return \"API exists\" else: return \"API not", "otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From NR", "exists\" else: return \"API not found\" # disable the API once user decide", "@frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): # # ms = frappe.get_doc('Machine Status', ms_id)", "if API exist in database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server", "'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 })", "= frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat):", "create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine", "# 'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, #", "import unicode_literals import frappe from frappe.model.document import Document class MachineStatus(Document): # return True", "# ms = frappe.get_doc('Machine Status', ms_id) # # ms.reload() # # ms.sync_with_nr =", "= new_value # # ms.db_set('sync_with_nr', new_value) # # ms.save() # # ms.reload() @frappe.whitelist()", "license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document", "license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class MachineStatus(Document):", "get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist() # def", "frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status':", "# msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time':", "# # ms.db_set('sync_with_nr', new_value) # # ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id,", "# ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status',", "Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat,", "ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms =", "ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status',", "Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat From NR To", "msl.docstatus = 1 # # msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value):", "# # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl", "update_sync_with_nr_flag(self, ms_id, new_value): # # ms = frappe.get_doc('Machine Status', ms_id) # # ms.reload()", "if frappe.db.exists(\"Server Script\", \"Mac Stat From NR To ERPNext\"): return \"API exists\" else:", "= frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine Status Log') # msl.update({ #", "return True if API exist in database, otherwise return False. @frappe.whitelist() def check_if_API_exist(self):", "def update_sync_with_nr_flag(self, ms_id, new_value): # # ms = frappe.get_doc('Machine Status', ms_id) # #", "# ms = frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine Status Log') #", "information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import", "Status', ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({ 'machine_status': ms.name, 'asset': ms.asset, 'workstation':", "frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status': ms.name, # 'asset': ms.asset, # 'workstation':", "ms.name, 'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert()", "'asset': ms.asset, # 'workstation': ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus':", "from frappe.model.document import Document class MachineStatus(Document): # return True if API exist in", "def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id) # msl =", "coding: utf-8 -*- # Copyright (c) 2021, cjs and contributors # For license", "}) # msl.insert() # # msl.docstatus = 1 # # msl.save() # @frappe.whitelist()", "# msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): # # ms =", "'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1 # }) # msl.insert() #", "msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): # # ms = frappe.get_doc('Machine", "ms_id) # # ms.reload() # # ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value)", "see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class", "ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id) # msl = frappe.new_doc('Machine Status", "import Document class MachineStatus(Document): # return True if API exist in database, otherwise", "ms_id, new_value): # # ms = frappe.get_doc('Machine Status', ms_id) # # ms.reload() #", "# ms.db_set('sync_with_nr', new_value) # # ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat):", "__future__ import unicode_literals import frappe from frappe.model.document import Document class MachineStatus(Document): # return", "# disable the API once user decide to NOT sync with Node-RED @frappe.whitelist()", "frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert() # msl.docstatus = 1 # msl.save()", "ms_id): ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist() # def create_new_mac_stat_log(self,", "with Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat From NR", "To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload() # @frappe.whitelist() def get_previous_mac_stat(self,", "ms.workstation, # 'current_time': frappe.utils.get_datetime(), # 'current_machine_status': new_mac_stat, # 'docstatus': 1 # }) #", "ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert() # msl.docstatus = 1", "def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From NR To ERPNext\"): return \"API", "create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl = frappe.new_doc('Machine Status Log') msl.update({", "# # msl.save() # @frappe.whitelist() # def update_sync_with_nr_flag(self, ms_id, new_value): # # ms", "disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat From NR To ERPNext\") # ssapi.reload()", "unicode_literals import frappe from frappe.model.document import Document class MachineStatus(Document): # return True if", "# ms.reload() # # ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value) # #", "NOT sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac Stat", "check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From NR To ERPNext\"): return \"API exists\"", "ERPNext\"): return \"API exists\" else: return \"API not found\" # disable the API", "@frappe.whitelist() def get_previous_mac_stat(self, ms_id): ms = frappe.get_doc('Machine Status', ms_id) return ms.machine_status # @frappe.whitelist()", "@frappe.whitelist() def check_if_API_exist(self): if frappe.db.exists(\"Server Script\", \"Mac Stat From NR To ERPNext\"): return", "# return True if API exist in database, otherwise return False. @frappe.whitelist() def", "# def create_new_mac_stat_log(self, ms_id, new_mac_stat): # ms = frappe.get_doc('Machine Status', ms_id) # msl", "\"Mac Stat From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled', 1, commit=True) ssapi.save(ignore_version=True) ssapi.reload()", "to NOT sync with Node-RED @frappe.whitelist() def disable_API(self): ssapi = frappe.get_doc('Server Script', \"Mac", "# 'current_machine_status': new_mac_stat, # 'docstatus': 1 # }) # msl.insert() # # msl.docstatus", "# ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine Status', ms_id) msl =", "ms.reload() # # ms.sync_with_nr = new_value # # ms.db_set('sync_with_nr', new_value) # # ms.save()", "found\" # disable the API once user decide to NOT sync with Node-RED", "\"API exists\" else: return \"API not found\" # disable the API once user", "# 'docstatus': 1 # }) # msl.insert() # # msl.docstatus = 1 #", "# -*- coding: utf-8 -*- # Copyright (c) 2021, cjs and contributors #", "# msl.insert() # # msl.docstatus = 1 # # msl.save() # @frappe.whitelist() #", "cjs and contributors # For license information, please see license.txt from __future__ import", "(c) 2021, cjs and contributors # For license information, please see license.txt from", "# msl = frappe.new_doc('Machine Status Log') # msl.update({ # 'machine_status': ms.name, # 'asset':", "'docstatus': 1 # }) # msl.insert() # # msl.docstatus = 1 # #", "'asset': ms.asset, 'workstation': ms.workstation, 'current_time': frappe.utils.get_datetime(), 'current_machine_status': new_mac_stat, 'docstatus': 1 }) msl.insert() #", "ssapi = frappe.get_doc('Server Script', \"Mac Stat From NR To ERPNext\") # ssapi.reload() ssapi.db_set('disabled',", "new_value): # # ms = frappe.get_doc('Machine Status', ms_id) # # ms.reload() # #", "# # ms.save() # # ms.reload() @frappe.whitelist() def create_new_mac_stat_log(ms_id, new_mac_stat): ms = frappe.get_doc('Machine" ]
[ "from appJar import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\",", "\"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\")", "app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\")", "app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\",", "app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\",", "text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\",", "\"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\",", "\"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\",", "app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None)", "\"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\") app.addMeter(\"m1\")", "sys sys.path.append(\"../../\") from appJar import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default", "app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\",", "\"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\") app.addMeter(\"m1\") app.addEntry(\"e1\") app.addEntry(\"e2\") app.setEntryDefault(\"e1\", \"<DEFAULT>\") app.go(language=\"ENGLISH\")", "sys.path.append(\"../../\") from appJar import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\")", "import sys sys.path.append(\"../../\") from appJar import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\",", "app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\",", "\"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\")", "\"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\",", "\"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\")", "[\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\")", "\"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\",", "def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\",", "app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\",", "app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame()", "text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\",", "app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\",", "\"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\",", "\"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\")", "app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\",", "app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"])", "appJar import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\",", "text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\")", "press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default", "app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\")", "app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\",", "\"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\") app.addMeter(\"m1\") app.addEntry(\"e1\") app.addEntry(\"e2\") app.setEntryDefault(\"e1\",", "app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\")", "press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\")", "\"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\",", "app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\")", "\"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\",", "[\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\")", "app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\")", "text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"])", "\"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\",", "<filename>examples/internationalisation/testLangs.py import sys sys.path.append(\"../../\") from appJar import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash()", "\"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\")", "gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press)", "\"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message", "app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\")", "app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\") app.addMeter(\"m1\") app.addEntry(\"e1\") app.addEntry(\"e2\")", "app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\")", "app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"])", "app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\") app.addRadioButton(\"name\", \"b2\") app.addRadioButton(\"name\", \"b3\") app.addRadioButton(\"name\", \"b4\") app.stopLabelFrame() app.addRadioButton(\"age\",", "app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default", "app.stopLabelFrame() app.addRadioButton(\"age\", \"b1\") app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\") app.addMeter(\"m1\") app.addEntry(\"e1\")", "[\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addCheckBox(\"b1\") app.addCheckBox(\"b2\") app.addCheckBox(\"b3\") app.startLabelFrame(\"Names\") app.addRadioButton(\"name\", \"b1\")", "app.addRadioButton(\"age\", \"b2\") app.addRadioButton(\"age\", \"b3\") app.addLink(\"l1\", None) app.addWebLink(\"l2\", \"http://www.appJar.info\") app.addMeter(\"m1\") app.addEntry(\"e1\") app.addEntry(\"e2\") app.setEntryDefault(\"e1\", \"<DEFAULT>\")", "\"Korean\", \"French\"], press) app.addLabel(\"l2\", \"default text\") app.addLabel(\"l3\", \"default text\") app.addLabelEntry(\"Genome\") app.addLabelScale(\"s1\") app.addMessage(\"m1\", \"Default", "message text\") app.addListBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addOptionBox(\"fruits\", [\"apples\", \"oranges\", \"tomatoes\"]) app.addSpinBox(\"fruits\", [\"apples\", \"oranges\",", "import gui def press(btn): app.changeLanguage(btn) app=gui() app.showSplash() app.addLabel(\"l1\", \"default text\") app.addButtons([\"English\", \"Korean\", \"French\"]," ]
[ "== 3: print(\"\\n\") print('\\n') cat = int(input('Enter the No. : ')) return cat_list[cat-1]", "3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER') self.a = 'PLEASE ENTER A", "input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m", "',i+1) user_name = input('Enter your Name : ') user_age = int(input('Enter your Age", "======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list): print('{}.", "Business: a = '' auth = False def __init__(self): print(''' SELECT YOUR CHOICE", "input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY", "dict = { 'name': name, 'age': age, 'location': loc, 'category': cat, 'subcategory': subcat,", "') if (self.auth == False): if (admin_username == 'admin123' and admin_password == 'password'):", "= open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY", "OR PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter", "self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE", "| CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {}", "ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO.", "return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for i,each in", "Categories want to be Added : ')) for i in range(n): print('\\n =======================", "= [] subcat = [] year = [] # ADMIN INPUT LIST cat_list", "STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category", "to be added for the above Category : ')) new_subcat_list = [] for", "= int(input('Enter the No. of Sub Categories want to be added for the", "cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of Sub", "self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def", "pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY", "CATEGORY NO. | SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\"", "No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub", "PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter ==", "else: print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD' self.__init__() else:", "int(input('Enter your Age : ')) user_loc = input('Enter your Location : ') user_cat", "loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the No. of", "Name : ') user_age = int(input('Enter your Age : ')) user_loc = input('Enter", "the above Category : ')) new_subcat_list = [] for j in range(m): print('Sub", "(enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER') self.a = 'PLEASE", "the No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = []", "enter = int(input('Enter your Choice : ')) if (enter == 1): admin_username =", "{} # GUEST INPUT file_name = 'categories.txt' class Business: a = '' auth", "i in range(1): print('USER NO. : ',i+1) user_name = input('Enter your Name :", "intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO.", "print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k]", "INPUT file_name = 'categories.txt' class Business: a = '' auth = False def", "= int(input('Enter your Age : ')) user_loc = input('Enter your Location : ')", "Location : ') user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize())", "No. of Sub Categories want to be added for the above Category :", "{} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') cat =", "cat = int(input('Enter the No. : ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE", "ADMIN 2. USER 3. GUEST ''') enter = int(input('Enter your Choice : '))", "= 'PLEASE ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1):", "= self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def", "')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for i,each", "i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3:", "age = [] loc = [] cat = [] subcat = [] year", "GUEST INPUT file_name = 'categories.txt' class Business: a = '' auth = False", "'category': cat, 'subcategory': subcat, 'year': year } df = pd.DataFrame(dict) csv = df.to_csv('data.csv',", "input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY", "2. USER 3. GUEST ''') enter = int(input('Enter your Choice : ')) if", "for i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 ==", ": ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1))", ": ')) for i in range(n): print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1))", "SUB CATEGORY OF {} IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter the No.", "import matplotlib.pyplot as plt import seaborn as sns import datetime import warnings #", "print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {} def", "ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name':", "No. of Categories want to be Added : ')) for i in range(n):", "= 'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year)", "in range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ')", "CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name,", "SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\")", "name = [] age = [] loc = [] cat = [] subcat", "print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list)", "3: print(\"\\n\") print('\\n') cat = int(input('Enter the No. : ')) return cat_list[cat-1] def", "loc = [] cat = [] subcat = [] year = [] #", "= 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER", "''') enter = int(input('Enter your Choice : ')) if (enter == 1): admin_username", "else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!'", "4 == 3: print(\"\\n\") print('\\n') cat = int(input('Enter the No. : ')) return", "import numpy as np import pandas as pd import matplotlib.pyplot as plt import", "your Choice : ')) if (enter == 1): admin_username = input('ENTER USERNAME :", "USER INPUT LIST name = [] age = [] loc = [] cat", "int(input('Enter the No. : ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS", "Choice : ')) if (enter == 1): admin_username = input('ENTER USERNAME : ')", "= [] # ADMIN INPUT LIST cat_list = [] cat_sub = {} #", "= input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k)", "[] subcat = [] year = [] # ADMIN INPUT LIST cat_list =", "as pd import matplotlib.pyplot as plt import seaborn as sns import datetime import", "added for the above Category : ')) new_subcat_list = [] for j in", "{} IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter the No. : ')) print('SUBCATEGORY", "OF {} IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter the No. : '))", "3. GUEST ''') enter = int(input('Enter your Choice : ')) if (enter ==", "!!!' elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER') self.a", "{} def add_category(self,new_cat): n = int(input('Enter the No. of Categories want to be", "False def __init__(self): print(''' SELECT YOUR CHOICE : 1. ADMIN 2. USER 3.", "======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the", "= False def __init__(self): print(''' SELECT YOUR CHOICE : 1. ADMIN 2. USER", "datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter", "print('Category No. :',i+1) user_category = input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for", ": ')) new_subcat_list = [] for j in range(m): print('Sub Category No. :',j+1)", "INPUT LIST name = [] age = [] loc = [] cat =", "self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY", "a = '' auth = False def __init__(self): print(''' SELECT YOUR CHOICE :", "the No. of Categories want to be Added : ')) for i in", "= int(input('Enter the No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list", "= input('ENTER PASSWORD : ') if (self.auth == False): if (admin_username == 'admin123'", "user_category = input('Enter the Category : ') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k)", "LIST cat_list = [] cat_sub = {} # GUEST INPUT file_name = 'categories.txt'", "admin_password = input('ENTER PASSWORD : ') if (self.auth == False): if (admin_username ==", "True else: print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD' self.__init__()", "Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY {} ===========================", ": ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1))", "file_name = 'categories.txt' class Business: a = '' auth = False def __init__(self):", "[] year = [] # ADMIN INPUT LIST cat_list = [] cat_sub =", "print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') print('THE", ": ',i+1) user_name = input('Enter your Name : ') user_age = int(input('Enter your", "\\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list)", "USERNAME OR PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif", "year = [] # ADMIN INPUT LIST cat_list = [] cat_sub = {}", "numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn", "NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO. : ',i+1) user_name", "enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f =", "the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY {}", "subcat = [] year = [] # ADMIN INPUT LIST cat_list = []", "user_name = input('Enter your Name : ') user_age = int(input('Enter your Age :", "= input('Enter the Category : ') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m", "Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] =", "subcat = int(input('Enter the No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1]", "2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True)", "self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif", "YOUR CHOICE : 1. ADMIN 2. USER 3. GUEST ''') enter = int(input('Enter", "new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub)", "PASSWORD : ') if (self.auth == False): if (admin_username == 'admin123' and admin_password", "to be Added : ')) for i in range(n): print('\\n ======================= STARTED CATEGORY", "= pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f = open(file_name,'w')", "i in range(n): print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1)", "cat, 'subcategory': subcat, 'year': year } df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False)", "input('ENTER USERNAME : ') admin_password = input('ENTER PASSWORD : ') if (self.auth ==", "{} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY NO. |", "def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY", "loc, 'category': cat, 'subcategory': subcat, 'year': year } df = pd.DataFrame(dict) csv =", "NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"|", "elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter", "as plt import seaborn as sns import datetime import warnings # DECLARING VARIABLES", "user_sub_category = input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n =======================", "be added for the above Category : ')) subcat_list = [] for j", "class Business: a = '' auth = False def __init__(self): print(''' SELECT YOUR", "cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age': age, 'location': loc, 'category':", "'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a", "for j in range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category", "want to be added for the above Category : ')) subcat_list = []", "print('THE CATEGORY IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='')", "int(input('Enter the No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list =", "f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n')", "in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f", "'subcategory': subcat, 'year': year } df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return", "if (enter == 1): admin_username = input('ENTER USERNAME : ') admin_password = input('ENTER", "year } df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False):", "') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return", "open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME", "cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the No. of Categories", "above Category : ')) subcat_list = [] for j in range(m): print('Sub Category", "def category(self): for i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i %", "user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n", "'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME", "Sub Categories want to be added for the above Category : ')) subcat_list", "subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self):", "[] age = [] loc = [] cat = [] subcat = []", "# IMPORTING LIBRARIES import numpy as np import pandas as pd import matplotlib.pyplot", "{} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') print('THE SUB", "No. :',i+1) user_category = input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k", "def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}.", "CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category :", "= input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED", "for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n')", "new_cat_list = [] new_cat_sub = {} def add_category(self,new_cat): n = int(input('Enter the No.", "# DECLARING VARIABLES # USER INPUT LIST name = [] age = []", "= [] cat_sub = {} # GUEST INPUT file_name = 'categories.txt' class Business:", "== 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {} IS : '.format(cat_value)) print('\\n')", "VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO. : ',i+1)", "enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n')", "(enter == 1): admin_username = input('ENTER USERNAME : ') admin_password = input('ENTER PASSWORD", "range(1): print('USER NO. : ',i+1) user_name = input('Enter your Name : ') user_age", "== False): if (admin_username == 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a =", "\\t \\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r') print(f.read())", "No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list", "NO. : ',i+1) user_name = input('Enter your Name : ') user_age = int(input('Enter", "import datetime import warnings # DECLARING VARIABLES # USER INPUT LIST name =", "\\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i", "f.write(\" \\t \\t | SUB CATEGORY NO. | SUB CATEGORY NAME |\") f.write('\\n')", "OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK", "in cat_list: cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want to", "CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list): print('{}. {} \\t", "def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age': age, 'location': loc, 'category': cat,", "|\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each))", "if i % 4 == 3: print(\"\\n\") print('\\n') cat = int(input('Enter the No.", "| {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r') print(f.read()) f.close() def __str__(self): return", "= [] cat = [] subcat = [] year = [] # ADMIN", "SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID USERNAME", "data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age': age, 'location': loc, 'category': cat, 'subcategory':", "'year': year } df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv def", "| {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY NO. | SUB", ": {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i %", "\\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age': age, 'location':", "'age': age, 'location': loc, 'category': cat, 'subcategory': subcat, 'year': year } df =", "LIST name = [] age = [] loc = [] cat = []", "= input('Enter your Name : ') user_age = int(input('Enter your Age : '))", "') admin_password = input('ENTER PASSWORD : ') if (self.auth == False): if (admin_username", "Categories want to be added for the above Category : ')) new_subcat_list =", "the No. : ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS :", "csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"|", "CATEGORY NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {}", "{ 'name': name, 'age': age, 'location': loc, 'category': cat, 'subcategory': subcat, 'year': year", "Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n ======================= ENDED CATEGORY {} ===========================", "print(''' SELECT YOUR CHOICE : 1. ADMIN 2. USER 3. GUEST ''') enter", "SELECT YOUR CHOICE : 1. ADMIN 2. USER 3. GUEST ''') enter =", "'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth =", "= int(input('Enter the No. of Categories want to be Added : ')) for", "{}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4", "'location': loc, 'category': cat, 'subcategory': subcat, 'year': year } df = pd.DataFrame(dict) csv", "')) if (enter == 1): admin_username = input('ENTER USERNAME : ') admin_password =", "i % 4 == 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {} IS", "= [] age = [] loc = [] cat = [] subcat =", "{}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {} def add_category(self,new_cat): n =", ": ')) if (enter == 1): admin_username = input('ENTER USERNAME : ') admin_password", "the above Category : ')) subcat_list = [] for j in range(m): print('Sub", "= input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED", "{} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category : ')", "(admin_username == 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY'", "user_sub_category = input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n =======================", "\\t \\t | SUB CATEGORY NO. | SUB CATEGORY NAME |\") f.write('\\n') for", "{} =========================== \\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='')", "======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = {", "= [] year = [] # ADMIN INPUT LIST cat_list = [] cat_sub", "be added for the above Category : ')) new_subcat_list = [] for j", "\\t | SUB CATEGORY NO. | SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each", "(self.auth == False): if (admin_username == 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a", "def __init__(self): print(''' SELECT YOUR CHOICE : 1. ADMIN 2. USER 3. GUEST", "int(input('Enter the No. of Sub Categories want to be added for the above", "= 'TASK COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME OR PASSWORD') self.a", "Categories want to be added for the above Category : ')) subcat_list =", "f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\"", "print('Category No. :',i+1) user_category = input('Enter the Category : ') cat_list.append(user_category.capitalize()) for k", "# GUEST INPUT file_name = 'categories.txt' class Business: a = '' auth =", "')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {}", "IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {} def add_category(self,new_cat):", "self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want to be", "int(input('Enter the No. of Categories want to be Added : ')) for i", "admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth = True else:", "f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} | {} \".format(str(j).center(11),subcat_each))", "j in range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category :", ": ') if (self.auth == False): if (admin_username == 'admin123' and admin_password ==", "print('\\n') cat = int(input('Enter the No. : ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n')", "NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} |", "CATEGORY IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if", "CATEGORY OF {} IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter the No. :", "= { 'name': name, 'age': age, 'location': loc, 'category': cat, 'subcategory': subcat, 'year':", "=========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category : ') cat_list.append(user_category.capitalize())", "COMPLETED SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID", "=========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age': age,", "Category : ')) new_subcat_list = [] for j in range(m): print('Sub Category No.", "plt import seaborn as sns import datetime import warnings # DECLARING VARIABLES #", "SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t |", "print('\\n') print('THE SUB CATEGORY OF {} IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter", "= {} # GUEST INPUT file_name = 'categories.txt' class Business: a = ''", "A VALID NUMBER') self.a = 'PLEASE ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year):", "self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO. : ',i+1) user_name =", ": ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the", "== 1): admin_username = input('ENTER USERNAME : ') admin_password = input('ENTER PASSWORD :", "admin(self,category,subcategory): n = int(input('Enter the No. of Categories want to be Added :", "Category : ') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m = int(input('Enter the", "i % 4 == 3: print(\"\\n\") print('\\n') cat = int(input('Enter the No. :", "for the above Category : ')) new_subcat_list = [] for j in range(m):", "No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list", "= new_subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year):", "Age : ')) user_loc = input('Enter your Location : ') user_cat = self.category()", "= datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n =", "return csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n')", "print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list):", "f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each", "your Age : ')) user_loc = input('Enter your Location : ') user_cat =", "SUB CATEGORY NO. | SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1):", "of Categories want to be Added : ')) for i in range(n): print('\\n", "Category : ')) subcat_list = [] for j in range(m): print('Sub Category No.", "=========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize())", "print('PLEASE ENTER A VALID NUMBER') self.a = 'PLEASE ENTER A VALID NUMBER' self.__init__()", "for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories", "3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {} IS : '.format(cat_value)) print('\\n') subcat", "ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list): print('{}. {}", "if i % 4 == 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {}", "new_cat_sub = {} def add_category(self,new_cat): n = int(input('Enter the No. of Categories want", "== 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID", "the No. of Sub Categories want to be added for the above Category", "') user_age = int(input('Enter your Age : ')) user_loc = input('Enter your Location", "in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\")", "new_subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict", "to be added for the above Category : ')) subcat_list = [] for", "\\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF", "return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age': age, 'location': loc,", "datetime import warnings # DECLARING VARIABLES # USER INPUT LIST name = []", "import seaborn as sns import datetime import warnings # DECLARING VARIABLES # USER", "\\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY", "in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want to", "= df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY')", "input('ENTER PASSWORD : ') if (self.auth == False): if (admin_username == 'admin123' and", "COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID", "the Category : ') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m = int(input('Enter", "input('Enter the Category : ') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m =", "name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the", "subcat, 'year': year } df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv", "f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME |\")", ": ') admin_password = input('ENTER PASSWORD : ') if (self.auth == False): if", "'TASK COMPLETED SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A", "input('Enter your Name : ') user_age = int(input('Enter your Age : ')) user_loc", "% 4 == 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {} IS :", "')) user_loc = input('Enter your Location : ') user_cat = self.category() user_subcat =", "user_loc = input('Enter your Location : ') user_cat = self.category() user_subcat = self.subcategory(self.category())", "4 == 3: print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {} IS : '.format(cat_value))", "print(\"\\n\") print('\\n') cat = int(input('Enter the No. : ')) return cat_list[cat-1] def subcategory(self,cat_value):", "Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter", "for i in range(1): print('USER NO. : ',i+1) user_name = input('Enter your Name", "| {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r') print(f.read()) f.close() def", "'.format(cat_value)) print('\\n') subcat = int(input('Enter the No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1]))", "IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i", "def admin(self,category,subcategory): n = int(input('Enter the No. of Categories want to be Added", "range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize())", "above Category : ')) new_subcat_list = [] for j in range(m): print('Sub Category", "| SUB CATEGORY NO. | SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each in", "Sub Categories want to be added for the above Category : ')) new_subcat_list", "and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth = True", "business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO. : ',i+1) user_name = input('Enter your", "user_age = int(input('Enter your Age : ')) user_loc = input('Enter your Location :", "subcat_list = [] for j in range(m): print('Sub Category No. :',j+1) user_sub_category =", "INPUT LIST cat_list = [] cat_sub = {} # GUEST INPUT file_name =", "= self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize())", ": ') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m = int(input('Enter the No.", "int(input('Enter your Choice : ')) if (enter == 1): admin_username = input('ENTER USERNAME", "cat = [] subcat = [] year = [] # ADMIN INPUT LIST", "# ADMIN INPUT LIST cat_list = [] cat_sub = {} # GUEST INPUT", "of Sub Categories want to be added for the above Category : '))", "')) for i in range(n): print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category", "if (self.auth == False): if (admin_username == 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory)", "in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\")", "print('\\n') subcat = int(input('Enter the No. : ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return", "else: print('PLEASE ENTER A VALID NUMBER') self.a = 'PLEASE ENTER A VALID NUMBER'", "seaborn as sns import datetime import warnings # DECLARING VARIABLES # USER INPUT", "subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the No. of Categories want", "cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for", ": {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {} def add_category(self,new_cat): n", "f.write(\"| CATEGORY NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in", "(enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter ==", ": '.format(cat_value)) print('\\n') subcat = int(input('Enter the No. : ')) print('SUBCATEGORY IS :", "\".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY NO. | SUB CATEGORY NAME", "for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t", "'categories.txt' class Business: a = '' auth = False def __init__(self): print(''' SELECT", "user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year)", "f.write(\" \\t \\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r')", "\\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') cat = int(input('Enter", "the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY {}", "self.a = 'INVALID USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2):", "age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the No.", "Added : ')) for i in range(n): print('\\n ======================= STARTED CATEGORY {} ===========================", "k in cat_list: cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want", "CATEGORY NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1):", "df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f =", "A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO. :", "SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER')", "self.a = 'PLEASE ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in", ": ') user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age)", "USERNAME : ') admin_password = input('ENTER PASSWORD : ') if (self.auth == False):", "cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want to be added", "= int(input('Enter your Choice : ')) if (enter == 1): admin_username = input('ENTER", "for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 ==", "NO. | SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t", "=========================== \\n'.format(i+1)) def category(self): for i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if", "f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY NO.", "{} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r') print(f.read()) f.close() def __str__(self):", "# USER INPUT LIST name = [] age = [] loc = []", "IMPORTING LIBRARIES import numpy as np import pandas as pd import matplotlib.pyplot as", "print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k]", "k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want", "print('USER NO. : ',i+1) user_name = input('Enter your Name : ') user_age =", "csv = df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND", "')) subcat_list = [] for j in range(m): print('Sub Category No. :',j+1) user_sub_category", "False): if (admin_username == 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK", "enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY", "| SUB CATEGORY NAME |\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t", "{} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY NO. | SUB CATEGORY", "as sns import datetime import warnings # DECLARING VARIABLES # USER INPUT LIST", "the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m =", "range(n): print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category =", "add_category(self,new_cat): n = int(input('Enter the No. of Categories want to be Added :", "')) new_subcat_list = [] for j in range(m): print('Sub Category No. :',j+1) user_sub_category", "i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t |", "1. ADMIN 2. USER 3. GUEST ''') enter = int(input('Enter your Choice :", "} df = pd.DataFrame(dict) csv = df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f", "ENTER A VALID NUMBER') self.a = 'PLEASE ENTER A VALID NUMBER' self.__init__() def", "as np import pandas as pd import matplotlib.pyplot as plt import seaborn as", "print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t", ":',i+1) user_category = input('Enter the Category : ') cat_list.append(user_category.capitalize()) for k in cat_list:", "f.write('\\n') f.write(\" \\t \\t | SUB CATEGORY NO. | SUB CATEGORY NAME |\")", "'name': name, 'age': age, 'location': loc, 'category': cat, 'subcategory': subcat, 'year': year }", "\\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter the Category : ') cat_list.append(user_category.capitalize()) for", "= {} def add_category(self,new_cat): n = int(input('Enter the No. of Categories want to", "in range(n): print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category", "NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {}", "sns import datetime import warnings # DECLARING VARIABLES # USER INPUT LIST name", "cat_list: cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want to be", "[] new_cat_sub = {} def add_category(self,new_cat): n = int(input('Enter the No. of Categories", "= [] new_cat_sub = {} def add_category(self,new_cat): n = int(input('Enter the No. of", "__init__(self): print(''' SELECT YOUR CHOICE : 1. ADMIN 2. USER 3. GUEST ''')", "== 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER') self.a = 'PLEASE ENTER", "import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import", "added for the above Category : ')) subcat_list = [] for j in", ": ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for", "print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No. :',i+1) user_category = input('Enter", "== 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth", "be Added : ')) for i in range(n): print('\\n ======================= STARTED CATEGORY {}", "cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m = int(input('Enter the No. of Sub", "your Name : ') user_age = int(input('Enter your Age : ')) user_loc =", "import warnings # DECLARING VARIABLES # USER INPUT LIST name = [] age", "auth = False def __init__(self): print(''' SELECT YOUR CHOICE : 1. ADMIN 2.", "ADMIN INPUT LIST cat_list = [] cat_sub = {} # GUEST INPUT file_name", "CHOICE : 1. ADMIN 2. USER 3. GUEST ''') enter = int(input('Enter your", "% 4 == 3: print(\"\\n\") print('\\n') cat = int(input('Enter the No. : '))", "GUEST ''') enter = int(input('Enter your Choice : ')) if (enter == 1):", "your Location : ') user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year", "n = int(input('Enter the No. of Categories want to be Added : '))", "j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True):", "= '' auth = False def __init__(self): print(''' SELECT YOUR CHOICE : 1.", ":',j+1) user_sub_category = input('Enter the Sub-Category : ') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n", ": ')) subcat_list = [] for j in range(m): print('Sub Category No. :',j+1)", "self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER') self.a = 'PLEASE ENTER A VALID", "= int(input('Enter the No. : ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY", "[] loc = [] cat = [] subcat = [] year = []", "No. :',i+1) user_category = input('Enter the Category : ') cat_list.append(user_category.capitalize()) for k in", "new_subcat_list = [] for j in range(m): print('Sub Category No. :',j+1) user_sub_category =", "1): admin_username = input('ENTER USERNAME : ') admin_password = input('ENTER PASSWORD : ')", ": 1. ADMIN 2. USER 3. GUEST ''') enter = int(input('Enter your Choice", "self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the No. of Categories want to be", "cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {} def add_category(self,new_cat): n = int(input('Enter the", "range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize())", "USER 3. GUEST ''') enter = int(input('Enter your Choice : ')) if (enter", "'' auth = False def __init__(self): print(''' SELECT YOUR CHOICE : 1. ADMIN", "np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns", "cat_sub = {} # GUEST INPUT file_name = 'categories.txt' class Business: a =", "self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year)", "def add_category(self,new_cat): n = int(input('Enter the No. of Categories want to be Added", "{} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict = { 'name': name, 'age':", "year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory): n = int(input('Enter the No. of Categories want to", "Category No. :',j+1) user_sub_category = input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] =", "') subcat_list.append(user_sub_category.capitalize()) cat_sub[k] = subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def", "[] # ADMIN INPUT LIST cat_list = [] cat_sub = {} # GUEST", "age, 'location': loc, 'category': cat, 'subcategory': subcat, 'year': year } df = pd.DataFrame(dict)", "def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER NO. : ',i+1) user_name = input('Enter", "user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize())", "return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub = {} def add_category(self,new_cat): n = int(input('Enter", "AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME |\") f.write('\\n')", "in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t \\t | SUB", "elif (enter == 3): self.intro(file_name,read=True) else: print('PLEASE ENTER A VALID NUMBER') self.a =", "f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n') f.write(\" \\t", "self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME OR", "pd import matplotlib.pyplot as plt import seaborn as sns import datetime import warnings", "= subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for i,each", "PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED", "in range(1): print('USER NO. : ',i+1) user_name = input('Enter your Name : ')", "self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories want to be added", "IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter the No. : ')) print('SUBCATEGORY IS", "= input('Enter your Location : ') user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year", "= True else: print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID USERNAME OR PASSWORD'", ": ')) print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1])) return cat_sub[cat_value][subcat-1] new_cat_list = [] new_cat_sub =", "f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. |", "VALID NUMBER') self.a = 'PLEASE ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for", "input('Enter your Location : ') user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year =", "warnings # DECLARING VARIABLES # USER INPUT LIST name = [] age =", "want to be added for the above Category : ')) new_subcat_list = []", "'PLEASE ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i in range(1): print('USER", "|\") f.write('\\n') for j,subcat_each in enumerate(cat_sub[cat_each],start=1): f.write(\" \\t \\t | {} | {}", "') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No.", "self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list: self.new_cat_sub.setdefault(k) m = int(input('Enter the No. of", "want to be Added : ')) for i in range(n): print('\\n ======================= STARTED", "admin_username = input('ENTER USERNAME : ') admin_password = input('ENTER PASSWORD : ') if", "for i in range(n): print('\\n ======================= STARTED CATEGORY {} =========================== \\n'.format(i+1)) print('Category No.", ": ')) user_loc = input('Enter your Location : ') user_cat = self.category() user_subcat", "print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') cat", ": ') user_age = int(input('Enter your Age : ')) user_loc = input('Enter your", "NUMBER') self.a = 'PLEASE ENTER A VALID NUMBER' self.__init__() def business(self,name,age,loc,cat,subcat,year): for i", "[] for j in range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter the", "USERNAME OR PASSWORD' self.__init__() else: self.add_category(self.new_cat_list) elif (enter == 2): self.business(name,age,loc,cat,subcat,year) self.a =", "pandas as pd import matplotlib.pyplot as plt import seaborn as sns import datetime", "category(self): for i,each in enumerate(cat_list): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4", "') cat_list.append(user_category.capitalize()) for k in cat_list: cat_sub.setdefault(k) m = int(input('Enter the No. of", "DECLARING VARIABLES # USER INPUT LIST name = [] age = [] loc", "VARIABLES # USER INPUT LIST name = [] age = [] loc =", ":',j+1) user_sub_category = input('Enter the Sub-Category : ') new_subcat_list.append(user_sub_category.capitalize()) self.new_cat_sub[k] = new_subcat_list print('\\n", ":',i+1) user_category = input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in", "for the above Category : ')) subcat_list = [] for j in range(m):", "self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter == 3): self.intro(file_name,read=True) else:", "for k in cat_list: cat_sub.setdefault(k) m = int(input('Enter the No. of Sub Categories", "subcat_list print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) def category(self): for i,each in", "print('\\n ======================= ENDED CATEGORY {} =========================== \\n'.format(i+1)) return cat_sub.update(self.new_cat_sub) def data_to_csv(self,name,age,loc,cat,subcat,year): dict =", "if (admin_username == 'admin123' and admin_password == 'password'): self.admin(cat_list,self.subcategory) self.a = 'TASK COMPLETED", "f.write(\"=======================================\") f.write('\\n') f.write(\"| CATEGORY NO. | CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for", "self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper()) cat.append(user_cat.capitalize()) subcat.append(user_subcat.capitalize()) year.append(user_year) self.data_to_csv(name,age,loc,cat,subcat,year) def admin(self,category,subcategory):", "matplotlib.pyplot as plt import seaborn as sns import datetime import warnings # DECLARING", "\\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n') cat = int(input('Enter the", "[] cat_sub = {} # GUEST INPUT file_name = 'categories.txt' class Business: a", "user_category = input('Enter the Category : ') self.new_cat_list.append(user_category.capitalize()) cat_list.extend(self.new_cat_list) for k in self.new_cat_list:", "enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3: print(\"\\n\") print('\\n')", "df.to_csv('data.csv', index=False) return csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n')", "= input('ENTER USERNAME : ') admin_password = input('ENTER PASSWORD : ') if (self.auth", "self.auth = True else: print('INVALID USERNAME OR PASSWORD') self.a = 'INVALID USERNAME OR", "[] cat = [] subcat = [] year = [] # ADMIN INPUT", "i,each in enumerate(cat_sub[cat_value]): print('{}. {} \\t \\t'.format(i+1,each),end='') if i % 4 == 3:", "== 2): self.business(name,age,loc,cat,subcat,year) self.a = 'TASK COMPLETED SUCCESSFULLY !!!' elif (enter == 3):", "index=False) return csv def intro(self,file_name,read=False): f = open(file_name,'w') f.write('CATEGORY AND SUBCATEGORY') f.write('\\n') f.write(\"=======================================\")", "print('THE SUB CATEGORY OF {} IS : '.format(cat_value)) print('\\n') subcat = int(input('Enter the", "cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]):", "\\t | {} | {} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r') print(f.read()) f.close()", "= [] for j in range(m): print('Sub Category No. :',j+1) user_sub_category = input('Enter", "= 'categories.txt' class Business: a = '' auth = False def __init__(self): print('''", "') user_cat = self.category() user_subcat = self.subcategory(self.category()) user_year = datetime.datetime.now().year name.append(user_name.capitalize()) age.append(user_age) loc.append(user_loc.upper())", "CATEGORY NAME |\") f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} |", "LIBRARIES import numpy as np import pandas as pd import matplotlib.pyplot as plt", "subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value)) for i,each in enumerate(cat_sub[cat_value]): print('{}. {}", "self.a = 'TASK COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME OR PASSWORD')", "m = int(input('Enter the No. of Sub Categories want to be added for", "name, 'age': age, 'location': loc, 'category': cat, 'subcategory': subcat, 'year': year } df", "No. : ')) return cat_list[cat-1] def subcategory(self,cat_value): print('\\n') print('THE CATEGORY IS : {}'.format(cat_value))", "{} \".format(str(j).center(11),subcat_each)) f.write('\\n') if(read==True): f = open(file_name,'r') print(f.read()) f.close() def __str__(self): return self.a", "cat_list = [] cat_sub = {} # GUEST INPUT file_name = 'categories.txt' class", "'TASK COMPLETED SUCCESSFULLY' self.auth = True else: print('INVALID USERNAME OR PASSWORD') self.a =", "= [] loc = [] cat = [] subcat = [] year =", "f.write('\\n') f.write(\"=======================================\") f.write('\\n') for i,cat_each in enumerate(cat_sub,start=1): f.write(\"| {} | {} \".format(str(i).center(9),cat_each)) f.write('\\n')", "print(\"\\n\") print('\\n') print('THE SUB CATEGORY OF {} IS : '.format(cat_value)) print('\\n') subcat =" ]
[ "######################## try: with open('NameChangeLog.txt', 'w') as file_log: for name in list_of_names: file_log.write(f'{set_date} --", "send_names() # import data from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page():", "as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt',", "def login_page(): ''' login in to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field", "browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets in", "file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything was change", "> li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll", "f'Automatic System - Forgot to clock out for Lunch - Administrator - {set_date}'", "this input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM", "their name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path", "login in to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field", "minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM in", "browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def", "gets in a list of names that will loop in it. ''' for", "'11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy", "\"\" NOTE = f'Automatic System - Forgot to clock out for Lunch -", "action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets in a list of names that", "click the employees link inside of the website after login in.''' employee_css =", "names to be change. ########## this created a log in a text file", "action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets in a list", "- Forgot to clock out for Lunch - Administrator - {set_date}' list_of_names =", "# this input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the", "name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as file_log:", "website and click the checkbox with their name''' info_name = browser.find_element_by_link_text(name) href_link =", "# import data from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): '''", "= info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view", "of the people who forgot to clock out in lunch time ''' #######################################", "datetime import date from get_names import send_names ''' this app automatic login in", "open('NameChangeLog.txt', 'a') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally:", "action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets in a", "= browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets", "in a list of names that will loop in it. ''' for name", "it. ''' for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the", "email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the employees link inside of", "to clock out for Lunch - Administrator - {set_date}' list_of_names = send_names() #", "get_names import send_names ''' this app automatic login in to a website and", "= '' PASSWORD = \"\" NOTE = f'Automatic System - Forgot to clock", "scroll the website and click the checkbox with their name''' info_name = browser.find_element_by_link_text(name)", "''' open select items and select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab", "selenium.webdriver.support.ui import Select from datetime import date from get_names import send_names ''' this", "and select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def", "summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the", "''' click the employees link inside of the website after login in.''' employee_css", "######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today", "login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a list of names to be change.", "= f'Automatic System - Forgot to clock out for Lunch - Administrator -", "date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept", "name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything was change successfully ')", "input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or", "browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click =", "from get_names import send_names ''' this app automatic login in to a website", "text): ''' open select items and select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view", "\"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME = '' PASSWORD = \"\" NOTE", "'w') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with", "''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this", "function gets in a list of names that will loop in it. '''", "data from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in", "cut the hours of the people who forgot to clock out in lunch", "the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field", "select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field", "select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this", "'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME = ''", "browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open select items and select them.", "= send_names() # import data from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def", "id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id,", "file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a')", "this function gets in a list of names that will loop in it.", "text=SET_TIME.split(':')[1]) # this input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input", "date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME = '' PASSWORD = \"\"", "for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open select items and select them. '''", "the website after login in.''' employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_", "f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open select items", "of the website after login in.''' employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)'", "a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll the website and", "= browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click", "input the AM or PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes')", "select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names):", "list of names to be change. ########## this created a log in a", "Select from datetime import date from get_names import send_names ''' this app automatic", "and cut the hours of the people who forgot to clock out in", "in to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field =", "def select_box(id, text): ''' open select items and select them. ''' action_find =", "for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the action bar", "for Lunch - Administrator - {set_date}' list_of_names = send_names() # import data from", "browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this", "select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM in field date_field =", "this input the AM or PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field =", "action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1])", "people who forgot to clock out in lunch time ''' ####################################### Basic settings", "in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0])", "webdriver from selenium.webdriver.support.ui import Select from datetime import date from get_names import send_names", "NOTE = f'Automatic System - Forgot to clock out for Lunch - Administrator", "select_names(name): ''' this scroll the website and click the checkbox with their name'''", "def select_names_flow(names): ''' this function gets in a list of names that will", "# today in format mm/dd/yyyy USERNAME = '' PASSWORD = \"\" NOTE =", "for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open select items and", "= browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click()", "field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ ==", "-- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as file_log: for name in list_of_names:", "to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV)", "the AM or PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date)", "this scroll the website and click the checkbox with their name''' info_name =", "clock out for Lunch - Administrator - {set_date}' list_of_names = send_names() # import", "System - Forgot to clock out for Lunch - Administrator - {set_date}' list_of_names", "loop in it. ''' for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to", "AM or PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE)", "and click the checkbox with their name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href')", "from selenium.webdriver.support.ui import Select from datetime import date from get_names import send_names '''", "# to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour", "href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): '''", "= '11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in format", "select items and select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find)", "be change. ########## this created a log in a text file ######################## try:", "a website and cut the hours of the people who forgot to clock", "= '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): '''", "summit_field.click() def select_employees_website(): ''' click the employees link inside of the website after", "hours of the people who forgot to clock out in lunch time '''", "to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen')", "a log in a text file ######################## try: with open('NameChangeLog.txt', 'w') as file_log:", "with their name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=')", "website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear()", "today in format mm/dd/yyyy USERNAME = '' PASSWORD = \"\" NOTE = f'Automatic", "will loop in it. ''' for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') #", "{name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as file_log: for name in list_of_names: file_log.write(f'{set_date}", "Administrator - {set_date}' list_of_names = send_names() # import data from get_names.py ################################################################################################ browser", "open select items and select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab =", "text='Check-Out') # to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the", "= Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets in a list of", "li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll the", "select_names_flow(names=list_of_names) # this accept a list of names to be change. ########## this", "name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path =", "settings ######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") #", "login in.''' employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click()", "''' login in to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password')", "import data from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login", "''' ####################################### Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date =", "link inside of the website after login in.''' employee_css = '.menu-main > li:nth-child(2)", "= \"\" NOTE = f'Automatic System - Forgot to clock out for Lunch", "except: with open('NameChangeLog.txt', 'a') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n')", "= browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): '''", "browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click", "select_box(id, text): ''' open select items and select them. ''' action_find = browser.find_element_by_id(id)", "the people who forgot to clock out in lunch time ''' ####################################### Basic", "employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name):", "items and select them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text)", "in lunch time ''' ####################################### Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV =", "the hours of the people who forgot to clock out in lunch time", "'a') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything", "email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD)", "login_page(): ''' login in to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field =", "out for Lunch - Administrator - {set_date}' list_of_names = send_names() # import data", "app automatic login in to a website and cut the hours of the", "open('NameChangeLog.txt', 'w') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except:", "USERNAME = '' PASSWORD = \"\" NOTE = f'Automatic System - Forgot to", "SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in", "list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as file_log: for name", "PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if", "a list of names that will loop in it. ''' for name in", "def select_names(name): ''' this scroll the website and click the checkbox with their", "for_click.click() def select_box(id, text): ''' open select items and select them. ''' action_find", "'.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this", "# this input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the", "name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the action bar select_box(id='TimeOut_Hour',", "in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything was change successfully ') browser.close()", "who forgot to clock out in lunch time ''' ####################################### Basic settings #########################################", "website after login in.''' employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ =", "bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) #", "that will loop in it. ''' for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out')", "''' for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the action", "action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function", "select_employees_website(): ''' click the employees link inside of the website after login in.'''", "with open('NameChangeLog.txt', 'w') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name)", "text=SET_TIME_DIV) # this input the AM or PM in field date_field = browser.find_element_by_id('TimeOut_Date')", "in field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__", "file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as file_log: for name in", "employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll the website and click", "Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\")", "== \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a list of names to", "employee_button_.click() def select_names(name): ''' this scroll the website and click the checkbox with", "to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM in field", "send_names ''' this app automatic login in to a website and cut the", "as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything was", "hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field select_box(id='TimeOut_AMPM',", "= browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll the website and click the", "################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to website autetification", "get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to website", "= browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page()", "browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website()", "browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the employees link", "from datetime import date from get_names import send_names ''' this app automatic login", "from selenium import webdriver from selenium.webdriver.support.ui import Select from datetime import date from", "in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as file_log: for", "Lunch - Administrator - {set_date}' list_of_names = send_names() # import data from get_names.py", "list of names that will loop in it. ''' for name in names:", "- {set_date}' list_of_names = send_names() # import data from get_names.py ################################################################################################ browser =", "from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to", "email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the employees link inside", "import Select from datetime import date from get_names import send_names ''' this app", "mm/dd/yyyy USERNAME = '' PASSWORD = \"\" NOTE = f'Automatic System - Forgot", "if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a list of", "in to a website and cut the hours of the people who forgot", "in format mm/dd/yyyy USERNAME = '' PASSWORD = \"\" NOTE = f'Automatic System", "names that will loop in it. ''' for name in names: select_names(name=name) select_box(id='employeeAction',", "inside of the website after login in.''' employee_css = '.menu-main > li:nth-child(2) >", "time ''' ####################################### Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date", "the checkbox with their name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number", "xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open", "select_employees_website() select_names_flow(names=list_of_names) # this accept a list of names to be change. ##########", "automatic login in to a website and cut the hours of the people", "for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) except: with open('NameChangeLog.txt', 'a') as", "the website and click the checkbox with their name''' info_name = browser.find_element_by_link_text(name) href_link", "website and cut the hours of the people who forgot to clock out", "selenium import webdriver from selenium.webdriver.support.ui import Select from datetime import date from get_names", "this created a log in a text file ######################## try: with open('NameChangeLog.txt', 'w')", "select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this", "select_names_flow(names): ''' this function gets in a list of names that will loop", "list_of_names = send_names() # import data from get_names.py ################################################################################################ browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp')", "log in a text file ######################## try: with open('NameChangeLog.txt', 'w') as file_log: for", "to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to", "of names that will loop in it. ''' for name in names: select_names(name=name)", "> a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll the website", "lunch time ''' ####################################### Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM'", "{set_date}' list_of_names = send_names() # import data from get_names.py ################################################################################################ browser = webdriver.Firefox()", "def select_employees_website(): ''' click the employees link inside of the website after login", "import send_names ''' this app automatic login in to a website and cut", "''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear()", "forgot to clock out in lunch time ''' ####################################### Basic settings ######################################### SET_TIME", "this accept a list of names to be change. ########## this created a", "''' this app automatic login in to a website and cut the hours", "change. ########## this created a log in a text file ######################## try: with", "print(name) except: with open('NameChangeLog.txt', 'a') as file_log: for name in list_of_names: file_log.write(f'{set_date} --", "file ######################## try: with open('NameChangeLog.txt', 'w') as file_log: for name in list_of_names: file_log.write(f'{set_date}", "to be change. ########## this created a log in a text file ########################", "after login in.''' employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css)", "of names to be change. ########## this created a log in a text", "= browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the employees", "href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def", "checkbox with their name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number =", "select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this", "or PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click()", "date_field = browser.find_element_by_id('TimeOut_Date') note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\":", "select_box(id='employeeAction', text='Check-Out') # to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input", "employees link inside of the website after login in.''' employee_css = '.menu-main >", "PASSWORD = \"\" NOTE = f'Automatic System - Forgot to clock out for", "out in lunch time ''' ####################################### Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV", "autetification ''' email_field = browser.find_element_by_css_selector('#eMail') password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME)", "in it. ''' for name in names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select", "import webdriver from selenium.webdriver.support.ui import Select from datetime import date from get_names import", "password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the employees link inside of the", "accept a list of names to be change. ########## this created a log", "to a website and cut the hours of the people who forgot to", "browser = webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to website autetification '''", "webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to website autetification ''' email_field =", "text=SET_TIME.split(':')[0]) # this input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input", "browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to website autetification ''' email_field = browser.find_element_by_css_selector('#eMail')", "the employees link inside of the website after login in.''' employee_css = '.menu-main", "field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM in field date_field", "= browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open select items and select", "href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path)", "field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) #", "\"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a list of names to be", "'' PASSWORD = \"\" NOTE = f'Automatic System - Forgot to clock out", "= webdriver.Firefox() browser.get('https://www.timestation.com/Login.asp') def login_page(): ''' login in to website autetification ''' email_field", "created a log in a text file ######################## try: with open('NameChangeLog.txt', 'w') as", "import date from get_names import send_names ''' this app automatic login in to", "''' this scroll the website and click the checkbox with their name''' info_name", "this input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes", "info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click()", "click the checkbox with their name''' info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=')", "info_name = browser.find_element_by_link_text(name) href_link = info_name.get_attribute('href') href_link.split('=') id_number = href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\"", "browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a list", "this app automatic login in to a website and cut the hours of", "with open('NameChangeLog.txt', 'a') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name)", "= date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME = '' PASSWORD =", "SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME", "note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a", "note_field = browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names)", "list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything was change successfully ') browser.close() ##########################################################################", "= f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text): ''' open select", "# this input the AM or PM in field date_field = browser.find_element_by_id('TimeOut_Date') note_field", "to clock out in lunch time ''' ####################################### Basic settings ######################################### SET_TIME =", "= href_link.split('=') xpath_path = f\".//input[@value={id_number[1]}]\" for_click = browser.find_element_by_xpath(xpath_path) for_click.location_once_scrolled_into_view for_click.click() def select_box(id, text):", "format mm/dd/yyyy USERNAME = '' PASSWORD = \"\" NOTE = f'Automatic System -", "the minutes to field select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM", "login in to a website and cut the hours of the people who", "names: select_names(name=name) select_box(id='employeeAction', text='Check-Out') # to select the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) #", "for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n') print(name) finally: print('everything was change successfully", "Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): ''' this function gets in a list of names", "text file ######################## try: with open('NameChangeLog.txt', 'w') as file_log: for name in list_of_names:", "= 'AM' set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME =", "set_date = date.strftime(date.today(), \"%m/%d/%Y\") # today in format mm/dd/yyyy USERNAME = '' PASSWORD", "####################################### Basic settings ######################################### SET_TIME = '11:50' SET_TIME_DIV = 'AM' set_date = date.strftime(date.today(),", "# this accept a list of names to be change. ########## this created", "in a text file ######################## try: with open('NameChangeLog.txt', 'w') as file_log: for name", "Forgot to clock out for Lunch - Administrator - {set_date}' list_of_names = send_names()", "password_field = browser.find_element_by_css_selector('#Password') summit_field = browser.find_element_by_css_selector('.ButtonGreen') email_field.clear() email_field.send_keys(USERNAME) password_field.clear() password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website():", "''' this function gets in a list of names that will loop in", "clock out in lunch time ''' ####################################### Basic settings ######################################### SET_TIME = '11:50'", "= browser.find_element_by_id('Notes') date_field.send_keys(set_date) note_field.send_keys(NOTE) browser.find_element_by_name('Submit').click() if __name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) #", "a list of names to be change. ########## this created a log in", "them. ''' action_find = browser.find_element_by_id(id) action_find.location_once_scrolled_into_view action_tab = Select(action_find) action_tab.select_by_visible_text(text) def select_names_flow(names): '''", "try: with open('NameChangeLog.txt', 'w') as file_log: for name in list_of_names: file_log.write(f'{set_date} -- {name}\\n')", "########## this created a log in a text file ######################## try: with open('NameChangeLog.txt',", "- Administrator - {set_date}' list_of_names = send_names() # import data from get_names.py ################################################################################################", "password_field.send_keys(PASSWORD) summit_field.click() def select_employees_website(): ''' click the employees link inside of the website", "browser.find_element_by_css_selector(employee_css) employee_button_.click() def select_names(name): ''' this scroll the website and click the checkbox", "input the hour to field select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to", "in.''' employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)' employee_button_ = browser.find_element_by_css_selector(employee_css) employee_button_.click() def", "__name__ == \"__main__\": login_page() select_employees_website() select_names_flow(names=list_of_names) # this accept a list of names", "a text file ######################## try: with open('NameChangeLog.txt', 'w') as file_log: for name in", "date from get_names import send_names ''' this app automatic login in to a", "the action bar select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field select_box(id='TimeOut_Minute'," ]
[ "MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions = [.2] +", "being masked by comparing row length before and after dropping rows with null", "known values correspond to the data at the original index\"\"\" for key, value", "masked for first site only if masking proportion provided for only that sample\"\"\"", "self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values correspond to the data at", "only if masking proportion provided for only that sample\"\"\" for key, value in", "setUp(self): pass def test_sites_masked(self): \"\"\" Test values are being masked by comparing row", "get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data)", "masking sites test the same sites are masked when rerun\"\"\" for key in", "the data at the original index\"\"\" for key, value in random_masking.masking_sites.items(): row_index, column_index", "= (int(x) for x in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def", "masking proportions masking_proportions = [.2] + [0 for _ in range(9)] masking_proportion_test =", "random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values correspond to the data at the", "value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) self.assertEqual(column_index, 0)", "in random_masking.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) known_value = test_matrix[row_index,", "for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values correspond", "in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values correspond to the", "list of masking sites test the same sites are masked when rerun\"\"\" for", "for key, value in random_masking.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_'))", "in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) self.assertEqual(column_index, 0) if", "= MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix,", "new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions =", "range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def", "[.2] + [0 for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites()", "MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix,", "saved known values correspond to the data at the original index\"\"\" for key,", "bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples", "+ [0 for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class", "and after dropping rows with null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count", "column_index = (int(x) for x in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value)", "proportions masking_proportions = [.2] + [0 for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix,", "site only if masking proportion provided for only that sample\"\"\" for key, value", "given a list of masking sites test the same sites are masked when", "_ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self):", "index\"\"\" for key, value in random_masking.masking_sites.items(): row_index, column_index = (int(x) for x in", "with null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count)", "def test_masking_proportions(self): \"\"\"Test sites masked for first site only if masking proportion provided", "# test masking proportions masking_proportions = [.2] + [0 for _ in range(9)]", "rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values", "= f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard masking random_masking =", "from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites,", "are being masked by comparing row length before and after dropping rows with", "value in random_masking.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) known_value =", "from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) #", "length before and after dropping rows with null values \"\"\" masked_row_count = random_masking.methylation_array[", "rows with null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count,", "test_same_sites_masked(self): \"\"\"If given a list of masking sites test the same sites are", "of masking sites test the same sites are masked when rerun\"\"\" for key", "masking_proportions = [.2] + [0 for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions,", "test_known_value_save(self): \"\"\"Test saved known values correspond to the data at the original index\"\"\"", "\"\"\"If given a list of masking sites test the same sites are masked", "for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def", "only that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for", "x in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites", "masked by comparing row length before and after dropping rows with null values", "from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data", "verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\" Test values are", "random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known sites new_masking =", "row_index, column_index = (int(x) for x in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value,", "(int(x) for x in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self):", "null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def", "masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self):", "for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_'))", "masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\" Test values are being", "sites test the same sites are masked when rerun\"\"\" for key in new_masking.masking_sites:", "input_row_count) def test_same_sites_masked(self): \"\"\"If given a list of masking sites test the same", "known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions", "test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a list of masking sites test", "masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\" Test values", "[0 for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase):", "masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) self.assertEqual(column_index, 0) if __name__", "f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix,", "import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard", "masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking", "row length before and after dropping rows with null values \"\"\" masked_row_count =", "comparing row length before and after dropping rows with null values \"\"\" masked_row_count", "bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data =", "test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05,", "verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions = [.2] + [0 for", "in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked", "at the original index\"\"\" for key, value in random_masking.masking_sites.items(): row_index, column_index = (int(x)", "to the data at the original index\"\"\" for key, value in random_masking.masking_sites.items(): row_index,", "masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True,", "known_value) def test_masking_proportions(self): \"\"\"Test sites masked for first site only if masking proportion", "key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked for", "TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\" Test values are being masked by", "same sites are masked when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def", "values are being masked by comparing row length before and after dropping rows", "get_bsb_matrix(test_methylation_data) # test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test", "are masked when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test", "= [.2] + [0 for _ in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True)", "proportion provided for only that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index", "\"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If", "in range(9)] masking_proportion_test = MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass", "input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a list of masking", "# test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() #", "test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard masking", "import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples =", "= random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a", "original index\"\"\" for key, value in random_masking.masking_sites.items(): row_index, column_index = (int(x) for x", "key, value in random_masking.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) known_value", "test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard masking random_masking", "# test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking", "new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values correspond to the data", "masking proportion provided for only that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index,", "test_samples = get_bsb_matrix(test_methylation_data) # test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites()", "MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05,", "by comparing row length before and after dropping rows with null values \"\"\"", "sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x in", "masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions = [.2] + [0", "masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions = [.2] + [0 for _", "random_masking.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) known_value = test_matrix[row_index, column_index]", "def setUp(self): pass def test_sites_masked(self): \"\"\" Test values are being masked by comparing", "if masking proportion provided for only that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items():", "row_index, column_index = (int(x) for x in key.split('_')) self.assertEqual(column_index, 0) if __name__ ==", "unittest import numpy as np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix", "column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked for first site only if", "\"\"\"Test sites masked for first site only if masking proportion provided for only", "= get_bsb_matrix(test_methylation_data) # test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() #", "random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a list", "sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions", "= (int(x) for x in key.split('_')) self.assertEqual(column_index, 0) if __name__ == '__main__': unittest.main()", "test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known", "values correspond to the data at the original index\"\"\" for key, value in", "<filename>tests/test_methylation_masking.py import unittest import numpy as np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions", "masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given", "masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known sites new_masking", "test the same sites are masked when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key,", "after dropping rows with null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count =", "the same sites are masked when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites)", "verbose=True) random_masking.mask_random_sites() # test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites)", "new_masking.mask_known_sites() # test masking proportions masking_proportions = [.2] + [0 for _ in", "values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self):", "for only that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x)", "sites masked for first site only if masking proportion provided for only that", "pass def test_sites_masked(self): \"\"\" Test values are being masked by comparing row length", "MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\" Test", "test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test", "test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True)", "key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known values correspond to", "def test_sites_masked(self): \"\"\" Test values are being masked by comparing row length before", "for x in key.split('_')) known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test", "a list of masking sites test the same sites are masked when rerun\"\"\"", "~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a list of", "\"\"\" Test values are being masked by comparing row length before and after", "def test_known_value_save(self): \"\"\"Test saved known values correspond to the data at the original", "test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked for first site only", "= MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites() # test masking proportions masking_proportions = [.2]", "first site only if masking proportion provided for only that sample\"\"\" for key,", "import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt'", "when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved known", "np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import test_directory", "standard masking random_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True) random_masking.mask_random_sites() # test masking known sites", "tests.TestHelpers import test_directory test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt' test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data) # test", "self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a list of masking sites test the", "masked when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self): \"\"\"Test saved", "known_value = test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked for first", "data at the original index\"\"\" for key, value in random_masking.masking_sites.items(): row_index, column_index =", "provided for only that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index =", "= test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0] self.assertLess(masked_row_count, input_row_count) def test_same_sites_masked(self): \"\"\"If given a list of masking sites", "test_sites_masked(self): \"\"\" Test values are being masked by comparing row length before and", "def test_same_sites_masked(self): \"\"\"If given a list of masking sites test the same sites", "key, value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x in key.split('_')) self.assertEqual(column_index,", "\"\"\"Test saved known values correspond to the data at the original index\"\"\" for", "column_index = (int(x) for x in key.split('_')) self.assertEqual(column_index, 0) if __name__ == '__main__':", "correspond to the data at the original index\"\"\" for key, value in random_masking.masking_sites.items():", "import unittest import numpy as np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import", "dropping rows with null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0] input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0]", "self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked for first site only if masking", "that sample\"\"\" for key, value in masking_proportion_test.masking_sites.items(): row_index, column_index = (int(x) for x", "= MaskImputationValues(methylation_array=test_matrix, masking_proportion=masking_proportions, verbose=True) masking_proportion_test.mask_random_sites() class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\"", "test_masking_proportions(self): \"\"\"Test sites masked for first site only if masking proportion provided for", "sites are masked when rerun\"\"\" for key in new_masking.masking_sites: self.assertIn(key, random_masking.masking_sites) def test_known_value_save(self):", "import numpy as np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from", "before and after dropping rows with null values \"\"\" masked_row_count = random_masking.methylation_array[ ~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0]", "= test_matrix[row_index, column_index] self.assertEqual(value, known_value) def test_masking_proportions(self): \"\"\"Test sites masked for first site", "the original index\"\"\" for key, value in random_masking.masking_sites.items(): row_index, column_index = (int(x) for", "random_masking.mask_random_sites() # test masking known sites new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True, masking_sites=random_masking.masking_sites) new_masking.mask_known_sites()", "Test values are being masked by comparing row length before and after dropping", "numpy as np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers", "as np from bsbolt.Impute.Validation.MaskValues import MaskImputationValues from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix from tests.TestHelpers import", "for first site only if masking proportion provided for only that sample\"\"\" for", "class TestSiteMasking(unittest.TestCase): def setUp(self): pass def test_sites_masked(self): \"\"\" Test values are being masked", "test masking proportions masking_proportions = [.2] + [0 for _ in range(9)] masking_proportion_test" ]
[ "D_i). spatial_dims: number of spatial dimensions of the images, 2 or 3. size_divisible:", "KIND, either express or implied. # See the License for the specific language", "of type Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1] != 2", "Unless required by applicable law or agreed to in writing, software # distributed", "of the inputs - pad the inputs so that the output spatial sizes", "tensor \" f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\" ) return def pad_images(", "targets during training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal to len(targets),", "size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if isinstance(input_images, Tensor): orig_size =", "= compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the padded images images = torch.zeros([len(image_sizes),", "an int, the same `size_divisible` will be applied to all the input spatial", "(B, C, H, W) or (B, C, H, W, D) Tensor - image_sizes,", "input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes)", "Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images needs to be a List[Tensor] or", "Use `SpatialPad` to match sizes, padding in the end will not affect boxes", "the input spatial dimensions. mode: available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``,", "a (B, C, H, W) or (B, C, H, W, D) Tensor. Padded", "images, 2 or 3. target_label_key: the expected key of target labels. target_box_key: the", "Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key: str, ) -> None: \"\"\" Validate", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "ValueError( \"When input_images is a Tensor, its need to be (spatial_dims + 2)-D.\"", "input_images[0].dtype device = input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ =", "Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List): for img in input_images: if len(img.shape)", "orig_size[i], 0)) for i, sp_i in enumerate(new_size)] pt_pad_width = [val for sublist in", "got {len(input_images)}, {len(targets)}.\") for target in targets: if (target_label_key not in target.keys()) or", "or 3. target_label_key: the expected key of target labels. target_box_key: the expected key", "return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode,", "the License. from typing import Dict, List, Sequence, Tuple, Union import torch import", "of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if", "Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function. Return:", "sp_i in enumerate(new_size)] pt_pad_width = [val for sublist in all_pad_width for val in", "this file except in compliance with the License. # You may obtain a", "are expected keys in targets. Got {target.keys()}.\" ) boxes = target[target_box_key] if not", "f\"In this case, it should be a {(spatial_dims + 2)}-D Tensor, got Tensor", "2 or 3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims + 2:", "is a List[Tensor], each element should have be (spatial_dims + 1)-D.\" f\"In this", "if (target_label_key not in target.keys()) or (target_box_key not in target.keys()): raise ValueError( f\"{target_label_key}", "target boxes to be a tensor \" f\"of shape [N, {2* spatial_dims}], got", "of target boxes. \"\"\" if targets is None: raise ValueError(\"Please provide ground truth", "Return: - images, a (B, C, H, W) or (B, C, H, W,", "0: # if there is no need to pad return input_images, [orig_size] *", "check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\" Validate the input dimensionality (raise", "input image shape. If an int, the same `size_divisible` will be applied to", "= [img.shape[-spatial_dims:] for img in input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device", "the images, 2 or 3. target_label_key: the expected key of target labels. target_box_key:", "List[Tensor], each element should have be (spatial_dims + 1)-D.\" f\"In this case, it", "{``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed string values or a user", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing,", "ANY KIND, either express or implied. # See the License for the specific", "``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function. Return: - images,", "affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img in enumerate(input_images):", "= padder(img) # type: ignore return images, [list(ss) for ss in image_sizes] def", "or (B, C, H, W, D) Tensor - image_sizes, the original spatial size", "\"\"\" Validate the input images/targets during training (raise a `ValueError` if invalid). Args:", ") boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to", "function. Return: - images, a (B, C, H, W) or (B, C, H,", "F from torch import Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size,", "raise ValueError( f\"Expected target boxes to be a tensor \" f\"of shape [N,", "be a tensor \" f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\" ) return", "images, a (B, C, H, W) or (B, C, H, W, D) Tensor", "typing import Dict, List, Sequence, Tuple, Union import torch import torch.nn.functional as F", "`torch.pad` function. Return: - images, a (B, C, H, W) or (B, C,", "# If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in input_images] in_channels =", "input images, including - validate of the inputs - pad the inputs so", "f\"In this case, it should be a {(spatial_dims + 1)}-D Tensor, got Tensor", "different size (C, H_i, W_i) or (C, H_i, W_i, D_i). targets: a list", "input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key: str,", "Union import torch import torch.nn.functional as F from torch import Tensor from monai.transforms.croppad.array", "images, so that the output spatial sizes are divisible by `size_divisible`. It pads", "images/targets during training (raise a `ValueError` if invalid). Args: input_images: It can be", "or 3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims + 2: raise", "input_images is a Tensor, its need to be (spatial_dims + 2)-D.\" f\"In this", "image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode,", "Copyright (c) MONAI Consortium # Licensed under the Apache License, Version 2.0 (the", "spatial size of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images:", "input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in input_images] in_channels = input_images[0].shape[0] dtype", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "permissions and # limitations under the License. from typing import Dict, List, Sequence,", "Sequence, Tuple, Union import torch import torch.nn.functional as F from torch import Tensor", "spatial_dims}], got {boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible:", "of dict. Each dict with two keys: target_box_key and target_label_key, ground-truth boxes present", "spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size", "Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for", "pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] =", "can be 1) a tensor sized (B, C, H, W) or (B, C,", "all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)] pt_pad_width", "in enumerate(new_size)] pt_pad_width = [val for sublist in all_pad_width for val in sublist[::-1]][::-1]", "img in enumerate(input_images): images[idx, ...] = padder(img) # type: ignore return images, [list(ss)", "spatial dimensions of the images, 2 or 3. \"\"\" if isinstance(input_images, Tensor): if", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "number of spatial dimensions of the images, 2 or 3. \"\"\" if isinstance(input_images,", "the original spatial size of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) #", "3D. size_divisible: int or Sequence[int], is the expected pattern on the input image", "language governing permissions and # limitations under the License. from typing import Dict,", "int, the same `size_divisible` will be applied to all the input spatial dimensions.", "from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode,", "torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible) !=", "Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images, including - validate of the inputs", "OF ANY KIND, either express or implied. # See the License for the", "a `ValueError` if invalid). Args: input_images: It can be 1) a tensor sized", "invalid). Args: input_images: It can be 1) a tensor sized (B, C, H,", "tensor sized (B, C, H, W) or (B, C, H, W, D), or", "should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}.\" ) else:", "ground truth targets during training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal", "MONAI Consortium # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "input images, so that the output spatial sizes are divisible by `size_divisible`. It", "D) is divisible by size_divisible. Default padding uses constant padding with value 0.0", "in the end will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs)", "isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to be of type Tensor, got {type(boxes)}.\")", "(H, W, D) is divisible by size_divisible. Default padding uses constant padding with", "in targets: if (target_label_key not in target.keys()) or (target_box_key not in target.keys()): raise", "+ max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to match sizes, padding in the", "or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims:", "len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the", "input images/targets during training (raise a `ValueError` if invalid). Args: input_images: It can", "convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) ->", "return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If input_images: List[Tensor]) image_sizes", "to match sizes, padding in the end will not affect boxes padder =", "(C, H_i, W_i, D_i). spatial_dims: number of spatial dimensions of the images, 2", "compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the padded images images = torch.zeros([len(image_sizes), in_channels]", "H, W) or (B, C, H, W, D) Tensor. Padded size (H, W)", "import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def", "W_i, D_i). targets: a list of dict. Each dict with two keys: target_box_key", "the expected pattern on the input image shape. If an int, the same", "D) Tensor - image_sizes, the original spatial size of each image \"\"\" size_divisible", "may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i). spatial_dims:", "from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor],", "expected pattern on the input image shape. If an int, the same `size_divisible`", "If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in input_images] in_channels = input_images[0].shape[0]", "a tensor \" f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\" ) return def", "ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory", "3. target_label_key: the expected key of target labels. target_box_key: the expected key of", "need to be (spatial_dims + 2)-D.\" f\"In this case, it should be a", "List): for img in input_images: if len(img.shape) != spatial_dims + 1: raise ValueError(", "to be a List[Tensor] or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets:", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "is the expected pattern on the input image shape. If an int, the", "in sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if there is no need to", "padding in the end will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode,", "target in targets: if (target_label_key not in target.keys()) or (target_box_key not in target.keys()):", "size_divisible: int or Sequence[int], is the expected pattern on the input image shape.", "to len(targets), got {len(input_images)}, {len(targets)}.\") for target in targets: if (target_label_key not in", "- images, a (B, C, H, W) or (B, C, H, W, D)", "D), or 2) a list of image tensors, each image i may have", "keys: target_box_key and target_label_key, ground-truth boxes present in the image. spatial_dims: number of", "key of target boxes. \"\"\" if targets is None: raise ValueError(\"Please provide ground", "torch.nn.functional as F from torch import Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils", "max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims", "as F from torch import Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import", "to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function. Return: -", "input_images: It can be 1) a tensor sized (B, C, H, W) or", "1)-D.\" f\"In this case, it should be a {(spatial_dims + 1)}-D Tensor, got", "or (B, C, H, W, D), or 2) a list of image tensors,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "(B, C, H, W) or (B, C, H, W, D), or 2) a", "of the images, 2 or 3. size_divisible: int or Sequence[int], is the expected", "f\"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}.\" ) boxes =", "-> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images, so that the output spatial", "enumerate(new_size)] pt_pad_width = [val for sublist in all_pad_width for val in sublist[::-1]][::-1] if", "k=size_divisible) # allocate memory for the padded images images = torch.zeros([len(image_sizes), in_channels] +", "return images, [list(ss) for ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims:", "``\"replicate\"``, ``\"circular\"``}. One of the listed string values or a user supplied function.", "Consortium # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to match sizes, padding in the end", "list of dict. Each dict with two keys: target_box_key and target_label_key, ground-truth boxes", "pad the inputs so that the output spatial sizes are divisible by `size_divisible`.", "match sizes, padding in the end will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size,", "spatial_dims: int) -> None: \"\"\" Validate the input dimensionality (raise a `ValueError` if", "there is no need to pad return input_images, [orig_size] * input_images.shape[0] mode_: str", "ValueError(f\"Expected target boxes to be of type Tensor, got {type(boxes)}.\") if len(boxes.shape) !=", "in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device # compute max_spatial_size image_sizes_t", "allocate memory for the padded images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype,", "Validate the input images/targets during training (raise a `ValueError` if invalid). Args: input_images:", "required by applicable law or agreed to in writing, software # distributed under", "image. spatial_dims: number of spatial dimensions of the images, 2 or 3. target_label_key:", "is None: raise ValueError(\"Please provide ground truth targets during training.\") if len(input_images) !=", "mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for", "key of target labels. target_box_key: the expected key of target boxes. \"\"\" if", "applicable law or agreed to in writing, software # distributed under the License", "PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images, including -", "None], spatial_dims: int, target_label_key: str, target_box_key: str, ) -> None: \"\"\" Validate the", "for img in input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device", "isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i", "create a (B, C, H, W) or (B, C, H, W, D) Tensor.", "pt_pad_width = [val for sublist in all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width)", "# Copyright (c) MONAI Consortium # Licensed under the Apache License, Version 2.0", "mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the", "padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img in enumerate(input_images): images[idx, ...]", "or agreed to in writing, software # distributed under the License is distributed", "the listed string values or a user supplied function. Defaults to ``\"constant\"``. See", "compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int)", "list of image tensors, each image i may have different size (C, H_i,", "\"When input_images is a List[Tensor], each element should have be (spatial_dims + 1)-D.\"", "(B, C, H, W, D) Tensor - image_sizes, the original spatial size of", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "== spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the padded", "from typing import Dict, List, Sequence, Tuple, Union import torch import torch.nn.functional as", "in targets. Got {target.keys()}.\" ) boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): raise", "in all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if there", "Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key: str, ) ->", "{type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims: raise ValueError(", "dtype = input_images[0].dtype device = input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t,", "https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function. Return: - images, a (B, C,", "spatial sizes are divisible by `size_divisible`. It pads them at the end to", "str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] #", "the inputs so that the output spatial sizes are divisible by `size_divisible`. It", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "Pad the input images, so that the output spatial sizes are divisible by", "all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if there is", "writing, software # distributed under the License is distributed on an \"AS IS\"", "original spatial size of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If", "= torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible)", "images, [list(ss) for ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int,", "pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:]", "len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.\") for", "in_channels] + max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to match sizes, padding in", "= input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device # compute max_spatial_size image_sizes_t =", "[list(ss) for ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible:", "the expected key of target labels. target_box_key: the expected key of target boxes.", "= SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img in enumerate(input_images): images[idx, ...] =", "SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img in enumerate(input_images): images[idx, ...] = padder(img)", "not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to be of type Tensor, got", "compliance with the License. # You may obtain a copy of the License", "Validate the input dimensionality (raise a `ValueError` if invalid). Args: input_images: It can", "tensors, each image i may have different size (C, H_i, W_i) or (C,", "int) -> None: \"\"\" Validate the input dimensionality (raise a `ValueError` if invalid).", "the end to create a (B, C, H, W) or (B, C, H,", "W) or (H, W, D) is divisible by size_divisible. Default padding uses constant", "\"When input_images is a Tensor, its need to be (spatial_dims + 2)-D.\" f\"In", "It pads them at the end to create a (B, C, H, W)", "Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs,", "dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t)", "W_i) or (C, H_i, W_i, D_i). spatial_dims: number of spatial dimensions of the", "boxes to be a tensor \" f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\"", "should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}.\" ) elif", "Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\" Validate the input dimensionality (raise a", "isinstance(input_images, List): for img in input_images: if len(img.shape) != spatial_dims + 1: raise", "two keys: target_box_key and target_label_key, ground-truth boxes present in the image. spatial_dims: number", "W, D) Tensor - image_sizes, the original spatial size of each image \"\"\"", "2)}-D Tensor, got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List): for img in", "in input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device # compute", "to be a tensor \" f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\" )", "Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int,", "images, 2 or 3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims +", "== 0: # if there is no need to pad return input_images, [orig_size]", "including - validate of the inputs - pad the inputs so that the", "dimensionality (raise a `ValueError` if invalid). Args: input_images: It can be 1) a", "if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims: raise ValueError( f\"Expected", "end will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx,", "H_i, W_i, D_i). spatial_dims: number of spatial dimensions of the images, 2 or", "raise ValueError(\"Please provide ground truth targets during training.\") if len(input_images) != len(targets): raise", "raise ValueError( \"When input_images is a List[Tensor], each element should have be (spatial_dims", "not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img in", "and {target_box_key} are expected keys in targets. Got {target.keys()}.\" ) boxes = target[target_box_key]", "C, H, W, D) Tensor. Padded size (H, W) or (H, W, D)", "torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\" Require", ") -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images, so that the output", "def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str]", "padding with value 0.0 Args: input_images: It can be 1) a tensor sized", "during training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal to len(targets), got", "ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.\") for target in targets: if", "W) or (B, C, H, W, D), or 2) a list of image", "= torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to match sizes,", "or (B, C, H, W, D) Tensor. Padded size (H, W) or (H,", "applied to all the input spatial dimensions. mode: available modes for PyTorch Tensor:", "in input_images: if len(img.shape) != spatial_dims + 1: raise ValueError( \"When input_images is", "C, H, W) or (B, C, H, W, D) Tensor - image_sizes, the", "[img.shape[-spatial_dims:] for img in input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device =", "not use this file except in compliance with the License. # You may", "Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import", "None: \"\"\" Validate the input images/targets during training (raise a `ValueError` if invalid).", "= target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to be of", "spatial_dims + 1: raise ValueError( \"When input_images is a List[Tensor], each element should", "!= len(targets): raise ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.\") for target", "import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims:", "raise ValueError(\"input_images needs to be a List[Tensor] or Tensor.\") return def check_training_targets( input_images:", "W) or (B, C, H, W, D) Tensor - image_sizes, the original spatial", "in target.keys()) or (target_box_key not in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are", "H, W) or (B, C, H, W, D), or 2) a list of", "W_i, D_i). spatial_dims: number of spatial dimensions of the images, 2 or 3.", "= compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i", "License, Version 2.0 (the \"License\"); # you may not use this file except", "``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed string values or a user supplied", "this case, it should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape", "be of type Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1] !=", "Tensor - image_sizes, the original spatial size of each image \"\"\" size_divisible =", "targets: a list of dict. Each dict with two keys: target_box_key and target_label_key,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "of spatial dimensions of the images, 2 or 3. \"\"\" if isinstance(input_images, Tensor):", "* spatial_dims: raise ValueError( f\"Expected target boxes to be a tensor \" f\"of", "H_i, W_i, D_i). spatial_dims: number of spatial dimensions of the images, 2D or", "idx, img in enumerate(input_images): images[idx, ...] = padder(img) # type: ignore return images,", "torch.Tensor): raise ValueError(f\"Expected target boxes to be of type Tensor, got {type(boxes)}.\") if", "so that the output spatial sizes are divisible by `size_divisible`. It pads them", ") else: raise ValueError(\"input_images needs to be a List[Tensor] or Tensor.\") return def", "the same `size_divisible` will be applied to all the input spatial dimensions. mode:", "ValueError( f\"Expected target boxes to be a tensor \" f\"of shape [N, {2*", "shape {input_images.shape}.\" ) elif isinstance(input_images, List): for img in input_images: if len(img.shape) !=", "ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\" Validate the input", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "# you may not use this file except in compliance with the License.", "spatial_dims: number of spatial dimensions of the images, 2 or 3. \"\"\" if", "or 2) a list of image tensors, each image i may have different", "input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] *", "agreed to in writing, software # distributed under the License is distributed on", "spatial dimensions of the images, 2D or 3D. size_divisible: int or Sequence[int], is", "are divisible by `size_divisible`. It pads them at the end to create a", "of spatial dimensions of the images, 2D or 3D. size_divisible: int or Sequence[int],", "ValueError(\"input_images needs to be a List[Tensor] or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor],", "the input images, so that the output spatial sizes are divisible by `size_divisible`.", "def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\" Validate the input dimensionality", "(the \"License\"); # you may not use this file except in compliance with", "(c) MONAI Consortium # Licensed under the Apache License, Version 2.0 (the \"License\");", "boxes to be of type Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2 or", "its need to be (spatial_dims + 2)-D.\" f\"In this case, it should be", "H_i, W_i, D_i). targets: a list of dict. Each dict with two keys:", "{target_box_key} are expected keys in targets. Got {target.keys()}.\" ) boxes = target[target_box_key] if", "have different size (C, H_i, W_i) or (C, H_i, W_i, D_i). spatial_dims: number", "image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or", "images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to", "monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\"", "Tensor. Padded size (H, W) or (H, W, D) is divisible by size_divisible.", "it should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}.\" )", "Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess", "the images, 2 or 3. size_divisible: int or Sequence[int], is the expected pattern", "the image. spatial_dims: number of spatial dimensions of the images, 2 or 3.", "or (C, H_i, W_i, D_i). spatial_dims: number of spatial dimensions of the images,", "List[Tensor] or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None],", "# Unless required by applicable law or agreed to in writing, software #", "str, target_box_key: str, ) -> None: \"\"\" Validate the input images/targets during training", "values or a user supplied function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs:", "# type: ignore return images, [list(ss) for ss in image_sizes] def preprocess_images( input_images:", "of spatial dimensions of the images, 2 or 3. size_divisible: int or Sequence[int],", "by applicable law or agreed to in writing, software # distributed under the", "(raise a `ValueError` if invalid). Args: input_images: It can be 1) a tensor", "a List[Tensor] or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]],", "One of the listed string values or a user supplied function. Defaults to", "import torch import torch.nn.functional as F from torch import Tensor from monai.transforms.croppad.array import", "be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images,", "2 * spatial_dims: raise ValueError( f\"Expected target boxes to be a tensor \"", "present in the image. spatial_dims: number of spatial dimensions of the images, 2", "len(img.shape) != spatial_dims + 1: raise ValueError( \"When input_images is a List[Tensor], each", "spatial dimensions of the images, 2 or 3. target_label_key: the expected key of", "images, 2D or 3D. size_divisible: int or Sequence[int], is the expected pattern on", "Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed string values or a", "dtype=dtype, device=device) # Use `SpatialPad` to match sizes, padding in the end will", "padded images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad`", "i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).", "= PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images, so", "**kwargs) for idx, img in enumerate(input_images): images[idx, ...] = padder(img) # type: ignore", "+ 1)}-D Tensor, got Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images needs to", "with value 0.0 Args: input_images: It can be 1) a tensor sized (B,", "file except in compliance with the License. # You may obtain a copy", "if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to be of type Tensor,", "len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims: raise ValueError( f\"Expected target", "original spatial size of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)", "for `torch.pad` function. Return: - images, a (B, C, H, W) or (B,", "targets is None: raise ValueError(\"Please provide ground truth targets during training.\") if len(input_images)", "device=device) # Use `SpatialPad` to match sizes, padding in the end will not", "or (target_box_key not in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are expected keys", "dimensions of the images, 2 or 3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape)", "f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor],", "License for the specific language governing permissions and # limitations under the License.", "W, D), or 2) a list of image tensors, each image i may", "+ 2: raise ValueError( \"When input_images is a Tensor, its need to be", "case, it should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}.\"", "training (raise a `ValueError` if invalid). Args: input_images: It can be 1) a", "size of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) return pad_images(input_images,", "pattern on the input image shape. If an int, the same `size_divisible` will", "to in writing, software # distributed under the License is distributed on an", "by `size_divisible`. It pads them at the end to create a (B, C,", "max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)] pt_pad_width = [val for", "each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if isinstance(input_images,", "implied. # See the License for the specific language governing permissions and #", "[N, {2* spatial_dims}], got {boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims:", "str, ) -> None: \"\"\" Validate the input images/targets during training (raise a", "\"License\"); # you may not use this file except in compliance with the", "for val in sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if there is no", "W, D) Tensor. Padded size (H, W) or (H, W, D) is divisible", "supplied function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad`", "spatial_dims) # If input_images: Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size =", "under the License. from typing import Dict, List, Sequence, Tuple, Union import torch", "max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the padded images images =", "of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) return pad_images(input_images, spatial_dims,", "return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key:", "training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)},", "Sequence[int], is the expected pattern on the input image shape. If an int,", "spatial size of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) return", "raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate", "3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims + 2: raise ValueError(", "len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t),", "Tensor): if len(input_images.shape) != spatial_dims + 2: raise ValueError( \"When input_images is a", "Tensor, its need to be (spatial_dims + 2)-D.\" f\"In this case, it should", "W) or (B, C, H, W, D) Tensor. Padded size (H, W) or", "size (C, H_i, W_i) or (C, H_i, W_i, D_i). targets: a list of", "a tensor sized (B, C, H, W) or (B, C, H, W, D),", "size (H, W) or (H, W, D) is divisible by size_divisible. Default padding", "H, W, D) Tensor. Padded size (H, W) or (H, W, D) is", "-> None: \"\"\" Validate the input images/targets during training (raise a `ValueError` if", "{len(input_images)}, {len(targets)}.\") for target in targets: if (target_label_key not in target.keys()) or (target_box_key", "or implied. # See the License for the specific language governing permissions and", "if there is no need to pad return input_images, [orig_size] * input_images.shape[0] mode_:", "dict with two keys: target_box_key and target_label_key, ground-truth boxes present in the image.", "input spatial dimensions. mode: available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}.", "max(pt_pad_width) == 0: # if there is no need to pad return input_images,", "divisible by `size_divisible`. It pads them at the end to create a (B,", "be 1) a tensor sized (B, C, H, W) or (B, C, H,", "for ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to be", "images[idx, ...] = padder(img) # type: ignore return images, [list(ss) for ss in", "{(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images needs", "available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed", "ValueError( \"When input_images is a List[Tensor], each element should have be (spatial_dims +", "not in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are expected keys in targets.", "Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims:", "+ 1: raise ValueError( \"When input_images is a List[Tensor], each element should have", "expected key of target boxes. \"\"\" if targets is None: raise ValueError(\"Please provide", "H, W, D) Tensor - image_sizes, the original spatial size of each image", "in writing, software # distributed under the License is distributed on an \"AS", "governing permissions and # limitations under the License. from typing import Dict, List,", "for target in targets: if (target_label_key not in target.keys()) or (target_box_key not in", "targets: if (target_label_key not in target.keys()) or (target_box_key not in target.keys()): raise ValueError(", "shape. If an int, the same `size_divisible` will be applied to all the", "a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images", "[(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)] pt_pad_width = [val", "enumerate(input_images): images[idx, ...] = padder(img) # type: ignore return images, [list(ss) for ss", "!= spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\")", "padding uses constant padding with value 0.0 Args: input_images: It can be 1)", "H, W, D), or 2) a list of image tensors, each image i", "License. from typing import Dict, List, Sequence, Tuple, Union import torch import torch.nn.functional", "if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.\")", "if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) ==", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "!= 2 * spatial_dims: raise ValueError( f\"Expected target boxes to be a tensor", "device = input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t,", "dimensions. mode: available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of", "target labels. target_box_key: the expected key of target boxes. \"\"\" if targets is", "got Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images needs to be a List[Tensor]", "image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) return pad_images(input_images, spatial_dims, size_divisible, mode,", "have be (spatial_dims + 1)-D.\" f\"In this case, it should be a {(spatial_dims", "boxes.shape[-1] != 2 * spatial_dims: raise ValueError( f\"Expected target boxes to be a", "\"\"\" if targets is None: raise ValueError(\"Please provide ground truth targets during training.\")", "input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device # compute max_spatial_size", "needs to be a List[Tensor] or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor],", "the expected key of target boxes. \"\"\" if targets is None: raise ValueError(\"Please", "the specific language governing permissions and # limitations under the License. from typing", "for img in input_images: if len(img.shape) != spatial_dims + 1: raise ValueError( \"When", "orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i],", "W_i) or (C, H_i, W_i, D_i). targets: a list of dict. Each dict", "for idx, img in enumerate(input_images): images[idx, ...] = padder(img) # type: ignore return", "mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the", "this case, it should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape", "got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List): for img in input_images: if", "str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images,", "list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for", "input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if", "val in sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if there is no need", "constant padding with value 0.0 Args: input_images: It can be 1) a tensor", "Tensor - image_sizes, the original spatial size of each image \"\"\" check_input_images(input_images, spatial_dims)", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img", "2 or boxes.shape[-1] != 2 * spatial_dims: raise ValueError( f\"Expected target boxes to", "you may not use this file except in compliance with the License. #", "or Sequence[int], is the expected pattern on the input image shape. If an", "keys in targets. Got {target.keys()}.\" ) boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor):", "same `size_divisible` will be applied to all the input spatial dimensions. mode: available", "image_sizes = [img.shape[-spatial_dims:] for img in input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype", "= [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)] pt_pad_width =", "target boxes. \"\"\" if targets is None: raise ValueError(\"Please provide ground truth targets", "got {type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims: raise", "\"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if isinstance(input_images, Tensor): orig_size", "target_label_key, ground-truth boxes present in the image. spatial_dims: number of spatial dimensions of", "spatial dimensions of the images, 2 or 3. size_divisible: int or Sequence[int], is", "spatial_dims + 2: raise ValueError( \"When input_images is a Tensor, its need to", "be (spatial_dims + 2)-D.\" f\"In this case, it should be a {(spatial_dims +", "- image_sizes, the original spatial size of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible,", "got {boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int,", "\"\"\" Validate the input dimensionality (raise a `ValueError` if invalid). Args: input_images: It", "by size_divisible. Default padding uses constant padding with value 0.0 Args: input_images: It", "should equal to len(targets), got {len(input_images)}, {len(targets)}.\") for target in targets: if (target_label_key", "size of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor", "elif isinstance(input_images, List): for img in input_images: if len(img.shape) != spatial_dims + 1:", "use this file except in compliance with the License. # You may obtain", "will be applied to all the input spatial dimensions. mode: available modes for", "uses constant padding with value 0.0 Args: input_images: It can be 1) a", "List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in input_images] in_channels = input_images[0].shape[0] dtype =", "the input images/targets during training (raise a `ValueError` if invalid). Args: input_images: It", "- pad the inputs so that the output spatial sizes are divisible by", "dimensions of the images, 2 or 3. size_divisible: int or Sequence[int], is the", "Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\"", "+ 2)-D.\" f\"In this case, it should be a {(spatial_dims + 2)}-D Tensor,", "target_label_key: str, target_box_key: str, ) -> None: \"\"\" Validate the input images/targets during", "target.keys()) or (target_box_key not in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are expected", "import Dict, List, Sequence, Tuple, Union import torch import torch.nn.functional as F from", "pad return input_images, [orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images,", "List[List[int]]]: \"\"\" Pad the input images, so that the output spatial sizes are", "if invalid). Args: input_images: It can be 1) a tensor sized (B, C,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "a Tensor, its need to be (spatial_dims + 2)-D.\" f\"In this case, it", "be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}.\" ) else: raise", "and target_label_key, ground-truth boxes present in the image. spatial_dims: number of spatial dimensions", "if targets is None: raise ValueError(\"Please provide ground truth targets during training.\") if", "f\"Expected target boxes to be a tensor \" f\"of shape [N, {2* spatial_dims}],", "W_i, D_i). spatial_dims: number of spatial dimensions of the images, 2D or 3D.", "PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed string values or", "a user supplied function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments", "no need to pad return input_images, [orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images,", "boxes present in the image. spatial_dims: number of spatial dimensions of the images,", "2D or 3D. size_divisible: int or Sequence[int], is the expected pattern on the", "= list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i], 0))", "[orig_size] * input_images.shape[0] # If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in", "(target_box_key not in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are expected keys in", "def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str,", "(B, C, H, W, D) Tensor. Padded size (H, W) or (H, W,", "spatial dimensions. mode: available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One", "sublist in all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if", "Dict, List, Sequence, Tuple, Union import torch import torch.nn.functional as F from torch", "C, H, W, D) Tensor - image_sizes, the original spatial size of each", "spatial_dims: raise ValueError( f\"Expected target boxes to be a tensor \" f\"of shape", "H_i, W_i) or (C, H_i, W_i, D_i). targets: a list of dict. Each", "2.0 (the \"License\"); # you may not use this file except in compliance", "in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode:", "==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the padded images images", "divisible by size_divisible. Default padding uses constant padding with value 0.0 Args: input_images:", "the input image shape. If an int, the same `size_divisible` will be applied", "D) Tensor. Padded size (H, W) or (H, W, D) is divisible by", "preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] =", "If input_images: Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "input_images.shape[0] # If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in input_images] in_channels", "provide ground truth targets during training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should", "be a List[Tensor] or Tensor.\") return def check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str,", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in", "check_training_targets( input_images: Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key:", "on the input image shape. If an int, the same `size_divisible` will be", "monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep", "should have be (spatial_dims + 1)-D.\" f\"In this case, it should be a", "in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are expected keys in targets. Got", "- image_sizes, the original spatial size of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible", "H_i, W_i) or (C, H_i, W_i, D_i). spatial_dims: number of spatial dimensions of", "express or implied. # See the License for the specific language governing permissions", "input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,", "\"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) return pad_images(input_images, spatial_dims, size_divisible, mode, **kwargs)", "if len(img.shape) != spatial_dims + 1: raise ValueError( \"When input_images is a List[Tensor],", "int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor,", "is divisible by size_divisible. Default padding uses constant padding with value 0.0 Args:", "and # limitations under the License. from typing import Dict, List, Sequence, Tuple,", "raise ValueError( \"When input_images is a Tensor, its need to be (spatial_dims +", "1)}-D Tensor, got Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images needs to be", "equal to len(targets), got {len(input_images)}, {len(targets)}.\") for target in targets: if (target_label_key not", "of the images, 2 or 3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) !=", "ignore return images, [list(ss) for ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor],", "0.0 Args: input_images: It can be 1) a tensor sized (B, C, H,", "Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images, so that the output spatial sizes", "i, sp_i in enumerate(new_size)] pt_pad_width = [val for sublist in all_pad_width for val", "of image tensors, each image i may have different size (C, H_i, W_i)", "different size (C, H_i, W_i) or (C, H_i, W_i, D_i). spatial_dims: number of", "either express or implied. # See the License for the specific language governing", "sizes, padding in the end will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\",", "images, including - validate of the inputs - pad the inputs so that", "_ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims: raise", "None: raise ValueError(\"Please provide ground truth targets during training.\") if len(input_images) != len(targets):", "boxes. \"\"\" if targets is None: raise ValueError(\"Please provide ground truth targets during", "H, W) or (B, C, H, W, D) Tensor - image_sizes, the original", "labels. target_box_key: the expected key of target boxes. \"\"\" if targets is None:", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "be applied to all the input spatial dimensions. mode: available modes for PyTorch", "It can be 1) a tensor sized (B, C, H, W) or (B,", "\"\"\" Preprocess the input images, including - validate of the inputs - pad", "the output spatial sizes are divisible by `size_divisible`. It pads them at the", "for i, sp_i in enumerate(new_size)] pt_pad_width = [val for sublist in all_pad_width for", "str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images,", "License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims + 2: raise ValueError( \"When", "spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) ->", "function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function.", "2 or 3. target_label_key: the expected key of target labels. target_box_key: the expected", "!= spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible)", "sublist[::-1]][::-1] if max(pt_pad_width) == 0: # if there is no need to pad", "input dimensionality (raise a `ValueError` if invalid). Args: input_images: It can be 1)", "2 or 3. size_divisible: int or Sequence[int], is the expected pattern on the", "user supplied function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for", "return input_images, [orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width,", "D) Tensor - image_sizes, the original spatial size of each image \"\"\" check_input_images(input_images,", "image_sizes, the original spatial size of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible =", "the License. # You may obtain a copy of the License at #", "monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor],", "image_sizes, the original spatial size of each image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)", "padder(img) # type: ignore return images, [list(ss) for ss in image_sizes] def preprocess_images(", "image i may have different size (C, H_i, W_i) or (C, H_i, W_i,", "# Use `SpatialPad` to match sizes, padding in the end will not affect", "of the images, 2 or 3. target_label_key: the expected key of target labels.", "is a Tensor, its need to be (spatial_dims + 2)-D.\" f\"In this case,", "listed string values or a user supplied function. Defaults to ``\"constant\"``. See also:", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "2)-D.\" f\"In this case, it should be a {(spatial_dims + 2)}-D Tensor, got", "the end will not affect boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for", "# limitations under the License. from typing import Dict, List, Sequence, Tuple, Union", "(spatial_dims + 1)-D.\" f\"In this case, it should be a {(spatial_dims + 1)}-D", "the inputs - pad the inputs so that the output spatial sizes are", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0,", "compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in", "See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function. Return: - images, a", "from torch import Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode", "You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "-> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images, including - validate of the", "target_box_key and target_label_key, ground-truth boxes present in the image. spatial_dims: number of spatial", "C, H, W) or (B, C, H, W, D) Tensor. Padded size (H,", "each image i may have different size (C, H_i, W_i) or (C, H_i,", "# If input_images: Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size,", "= [val for sublist in all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width) ==", "is no need to pad return input_images, [orig_size] * input_images.shape[0] mode_: str =", "= PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images, including", "number of spatial dimensions of the images, 2 or 3. target_label_key: the expected", ") elif isinstance(input_images, List): for img in input_images: if len(img.shape) != spatial_dims +", "sizes are divisible by `size_divisible`. It pads them at the end to create", "pads them at the end to create a (B, C, H, W) or", "size_divisible. Default padding uses constant padding with value 0.0 Args: input_images: It can", "raise ValueError( f\"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}.\" )", "img in input_images: if len(img.shape) != spatial_dims + 1: raise ValueError( \"When input_images", "element should have be (spatial_dims + 1)-D.\" f\"In this case, it should be", "image shape. If an int, the same `size_divisible` will be applied to all", "{2* spatial_dims}], got {boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int,", "Tuple, Union import torch import torch.nn.functional as F from torch import Tensor from", "case, it should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}.\"", "torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to match sizes, padding", "in enumerate(input_images): images[idx, ...] = padder(img) # type: ignore return images, [list(ss) for", "all the input spatial dimensions. mode: available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``,", "{input_images.shape}.\" ) elif isinstance(input_images, List): for img in input_images: if len(img.shape) != spatial_dims", "with the License. # You may obtain a copy of the License at", "target_box_key: the expected key of target boxes. \"\"\" if targets is None: raise", "raise ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.\") for target in targets:", "torch import Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from", "Got {target.keys()}.\" ) boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target", "img in input_images] in_channels = input_images[0].shape[0] dtype = input_images[0].dtype device = input_images[0].device #", "def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str]", "images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) # Use `SpatialPad` to match", "of the listed string values or a user supplied function. Defaults to ``\"constant\"``.", "Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i -", "to pad return input_images, [orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return", "limitations under the License. from typing import Dict, List, Sequence, Tuple, Union import", "{len(targets)}.\") for target in targets: if (target_label_key not in target.keys()) or (target_box_key not", "if max(pt_pad_width) == 0: # if there is no need to pad return", "= input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0)", "to be (spatial_dims + 2)-D.\" f\"In this case, it should be a {(spatial_dims", "!= 2 or boxes.shape[-1] != 2 * spatial_dims: raise ValueError( f\"Expected target boxes", "!= spatial_dims + 2: raise ValueError( \"When input_images is a Tensor, its need", "the input dimensionality (raise a `ValueError` if invalid). Args: input_images: It can be", "Preprocess the input images, including - validate of the inputs - pad the", "method=\"end\", mode=mode, **kwargs) for idx, img in enumerate(input_images): images[idx, ...] = padder(img) #", "for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed string values", "import torch.nn.functional as F from torch import Tensor from monai.transforms.croppad.array import SpatialPad from", "Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, )", "number of spatial dimensions of the images, 2D or 3D. size_divisible: int or", "SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images:", "PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images, so that", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "!= spatial_dims + 1: raise ValueError( \"When input_images is a List[Tensor], each element", "ValueError(\"Please provide ground truth targets during training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images)", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "* input_images.shape[0] # If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img in input_images]", "(B, C, H, W, D), or 2) a list of image tensors, each", "+ 1)-D.\" f\"In this case, it should be a {(spatial_dims + 1)}-D Tensor,", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes to be of type", "spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) #", "(C, H_i, W_i) or (C, H_i, W_i, D_i). targets: a list of dict.", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software", "Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key: str, ) -> None: \"\"\"", "D_i). targets: a list of dict. Each dict with two keys: target_box_key and", "the input images, including - validate of the inputs - pad the inputs", "image \"\"\" size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if isinstance(input_images, Tensor):", "the original spatial size of each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible,", "raise ValueError(f\"Expected target boxes to be of type Tensor, got {type(boxes)}.\") if len(boxes.shape)", "spatial_dims ==len(size_divisible).\") max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible) # allocate memory for the padded images", "D_i). spatial_dims: number of spatial dimensions of the images, 2D or 3D. size_divisible:", "shape {img.shape}.\" ) else: raise ValueError(\"input_images needs to be a List[Tensor] or Tensor.\")", ") -> None: \"\"\" Validate the input images/targets during training (raise a `ValueError`", "max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims:", "if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims + 2: raise ValueError( \"When input_images", "if len(input_images.shape) != spatial_dims + 2: raise ValueError( \"When input_images is a Tensor,", "a List[Tensor], each element should have be (spatial_dims + 1)-D.\" f\"In this case,", "in compliance with the License. # You may obtain a copy of the", "(C, H_i, W_i) or (C, H_i, W_i, D_i). spatial_dims: number of spatial dimensions", "Tensor], spatial_dims: int) -> None: \"\"\" Validate the input dimensionality (raise a `ValueError`", "of target labels. target_box_key: the expected key of target boxes. \"\"\" if targets", "(target_label_key not in target.keys()) or (target_box_key not in target.keys()): raise ValueError( f\"{target_label_key} and", "sized (B, C, H, W) or (B, C, H, W, D), or 2)", "number of spatial dimensions of the images, 2 or 3. size_divisible: int or", "the images, 2D or 3D. size_divisible: int or Sequence[int], is the expected pattern", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0]", "len(targets), got {len(input_images)}, {len(targets)}.\") for target in targets: if (target_label_key not in target.keys())", "target boxes to be of type Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2", "expected keys in targets. Got {target.keys()}.\" ) boxes = target[target_box_key] if not isinstance(boxes,", "targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key: str, ) -> None:", "isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims + 2: raise ValueError( \"When input_images is", "import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\" Validate", "- orig_size[i], 0)) for i, sp_i in enumerate(new_size)] pt_pad_width = [val for sublist", "or len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).\") max_spatial_size =", "for sublist in all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width) == 0: #", "inputs so that the output spatial sizes are divisible by `size_divisible`. It pads", "(C, H_i, W_i, D_i). spatial_dims: number of spatial dimensions of the images, 2D", "it should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}.\" )", "See the License for the specific language governing permissions and # limitations under", "2: raise ValueError( \"When input_images is a Tensor, its need to be (spatial_dims", "input_images, [orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_,", "\"\"\" Pad the input images, so that the output spatial sizes are divisible", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}.\"", "boxes padder = SpatialPad(spatial_size=max_spatial_size, method=\"end\", mode=mode, **kwargs) for idx, img in enumerate(input_images): images[idx,", "...] = padder(img) # type: ignore return images, [list(ss) for ss in image_sizes]", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "of the images, 2D or 3D. size_divisible: int or Sequence[int], is the expected", "W, D) is divisible by size_divisible. Default padding uses constant padding with value", "# compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t)", "target_box_key: str, ) -> None: \"\"\" Validate the input images/targets during training (raise", "new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i,", "{boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]],", "also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for `torch.pad` function. Return: - images, a (B,", "end to create a (B, C, H, W) or (B, C, H, W,", "**kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images, including - validate", "len(targets): raise ValueError(f\"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.\") for target in", "Union[List[Tensor], Tensor], targets: Union[List[Dict[str, Tensor]], None], spatial_dims: int, target_label_key: str, target_box_key: str, )", "the padded images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) # Use", "a (B, C, H, W) or (B, C, H, W, D) Tensor -", "``\"circular\"``}. One of the listed string values or a user supplied function. Defaults", "k=size_divisible) all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)]", "shape [N, {2* spatial_dims}], got {boxes.shape}.\" ) return def pad_images( input_images: Union[List[Tensor], Tensor],", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "targets. Got {target.keys()}.\" ) boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected", "List[List[int]]]: \"\"\" Preprocess the input images, including - validate of the inputs -", "input_images: if len(img.shape) != spatial_dims + 1: raise ValueError( \"When input_images is a", "Each dict with two keys: target_box_key and target_label_key, ground-truth boxes present in the", ") -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input images, including - validate of", "- validate of the inputs - pad the inputs so that the output", "expected key of target labels. target_box_key: the expected key of target boxes. \"\"\"", "Padded size (H, W) or (H, W, D) is divisible by size_divisible. Default", "* input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size]", "mode: available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the", "`size_divisible` will be applied to all the input spatial dimensions. mode: available modes", "Tensor, got Tensor shape {img.shape}.\" ) else: raise ValueError(\"input_images needs to be a", "or 3. size_divisible: int or Sequence[int], is the expected pattern on the input", "(B, C, H, W) or (B, C, H, W, D) Tensor. Padded size", "need to pad return input_images, [orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value", "C, H, W, D), or 2) a list of image tensors, each image", "Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "spatial_dims: number of spatial dimensions of the images, 2 or 3. size_divisible: int", "them at the end to create a (B, C, H, W) or (B,", "= ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:])", "the images, 2 or 3. \"\"\" if isinstance(input_images, Tensor): if len(input_images.shape) != spatial_dims", "for the padded images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device) #", "size_divisible: Union[int, Sequence[int]], mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]:", "or a user supplied function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other", "or boxes.shape[-1] != 2 * spatial_dims: raise ValueError( f\"Expected target boxes to be", "of spatial dimensions of the images, 2 or 3. target_label_key: the expected key", "D_i). spatial_dims: number of spatial dimensions of the images, 2 or 3. \"\"\"", "= torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\"", "Version 2.0 (the \"License\"); # you may not use this file except in", "for the specific language governing permissions and # limitations under the License. from", "dict. Each dict with two keys: target_box_key and target_label_key, ground-truth boxes present in", "except in compliance with the License. # You may obtain a copy of", "target_label_key: the expected key of target labels. target_box_key: the expected key of target", "Args: input_images: It can be 1) a tensor sized (B, C, H, W)", "None: \"\"\" Validate the input dimensionality (raise a `ValueError` if invalid). Args: input_images:", "that the output spatial sizes are divisible by `size_divisible`. It pads them at", "(H, W) or (H, W, D) is divisible by size_divisible. Default padding uses", "specific language governing permissions and # limitations under the License. from typing import", "int or Sequence[int], is the expected pattern on the input image shape. If", "`SpatialPad` to match sizes, padding in the end will not affect boxes padder", "Default padding uses constant padding with value 0.0 Args: input_images: It can be", "other arguments for `torch.pad` function. Return: - images, a (B, C, H, W)", "[orig_size] * input_images.shape[0] mode_: str = convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs),", "a list of dict. Each dict with two keys: target_box_key and target_label_key, ground-truth", "Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Preprocess the input", "spatial_dims: number of spatial dimensions of the images, 2 or 3. target_label_key: the", "value 0.0 Args: input_images: It can be 1) a tensor sized (B, C,", "input_images is a List[Tensor], each element should have be (spatial_dims + 1)-D.\" f\"In", "= input_images[0].dtype device = input_images[0].device # compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _", "(spatial_dims + 2)-D.\" f\"In this case, it should be a {(spatial_dims + 2)}-D", "a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List):", "C, H, W) or (B, C, H, W, D), or 2) a list", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", "or (C, H_i, W_i, D_i). targets: a list of dict. Each dict with", "ground-truth boxes present in the image. spatial_dims: number of spatial dimensions of the", "[val for sublist in all_pad_width for val in sublist[::-1]][::-1] if max(pt_pad_width) == 0:", "spatial_dims: int, target_label_key: str, target_box_key: str, ) -> None: \"\"\" Validate the input", "a list of image tensors, each image i may have different size (C,", "validate of the inputs - pad the inputs so that the output spatial", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "type Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1] != 2 *", "3. size_divisible: int or Sequence[int], is the expected pattern on the input image", "to create a (B, C, H, W) or (B, C, H, W, D)", "(C, H_i, W_i, D_i). targets: a list of dict. Each dict with two", "Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, **kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input", "**kwargs, ) -> Tuple[Tensor, List[List[int]]]: \"\"\" Pad the input images, so that the", "**kwargs), [orig_size] * input_images.shape[0] # If input_images: List[Tensor]) image_sizes = [img.shape[-spatial_dims:] for img", "# allocate memory for the padded images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size,", "Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width =", "mode=mode, **kwargs) for idx, img in enumerate(input_images): images[idx, ...] = padder(img) # type:", "in the image. spatial_dims: number of spatial dimensions of the images, 2 or", "to all the input spatial dimensions. mode: available modes for PyTorch Tensor: {``\"constant\"``,", "{img.shape}.\" ) else: raise ValueError(\"input_images needs to be a List[Tensor] or Tensor.\") return", "string values or a user supplied function. Defaults to ``\"constant\"``. See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html", "from monai.utils import PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None:", "= convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If", "input_images: Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible) all_pad_width", "output spatial sizes are divisible by `size_divisible`. It pads them at the end", "images, 2 or 3. size_divisible: int or Sequence[int], is the expected pattern on", "List, Sequence, Tuple, Union import torch import torch.nn.functional as F from torch import", "to be of type Tensor, got {type(boxes)}.\") if len(boxes.shape) != 2 or boxes.shape[-1]", "ensure_tuple_rep(size_divisible, spatial_dims) # If input_images: Tensor if isinstance(input_images, Tensor): orig_size = list(input_images.shape[-spatial_dims:]) new_size", "kwargs: other arguments for `torch.pad` function. Return: - images, a (B, C, H,", "truth targets during training.\") if len(input_images) != len(targets): raise ValueError(f\"len(input_images) should equal to", "ValueError( f\"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}.\" ) boxes", "PytorchPadMode, ensure_tuple_rep def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None: \"\"\" Validate the", "`ValueError` if invalid). Args: input_images: It can be 1) a tensor sized (B,", "dimensions of the images, 2D or 3D. size_divisible: int or Sequence[int], is the", "torch import torch.nn.functional as F from torch import Tensor from monai.transforms.croppad.array import SpatialPad", "convert_pad_mode(dst=input_images, mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If input_images:", "may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i). targets:", "{(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List): for", "during training (raise a `ValueError` if invalid). Args: input_images: It can be 1)", "arguments for `torch.pad` function. Return: - images, a (B, C, H, W) or", "type: ignore return images, [list(ss) for ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor],", "each image \"\"\" check_input_images(input_images, spatial_dims) size_divisible = ensure_tuple_rep(size_divisible, spatial_dims) return pad_images(input_images, spatial_dims, size_divisible,", "dimensions of the images, 2 or 3. target_label_key: the expected key of target", "have different size (C, H_i, W_i) or (C, H_i, W_i, D_i). targets: a", "Tensor, got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List): for img in input_images:", "each element should have be (spatial_dims + 1)-D.\" f\"In this case, it should", "memory for the padded images images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device)", "with two keys: target_box_key and target_label_key, ground-truth boxes present in the image. spatial_dims:", "len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims: raise ValueError(\" Require len(max_spatial_size_t) == spatial_dims", "F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If input_images: List[Tensor]) image_sizes =", "size (C, H_i, W_i) or (C, H_i, W_i, D_i). spatial_dims: number of spatial", "-> None: \"\"\" Validate the input dimensionality (raise a `ValueError` if invalid). Args:", "int, target_label_key: str, target_box_key: str, ) -> None: \"\"\" Validate the input images/targets", "`size_divisible`. It pads them at the end to create a (B, C, H,", "2) a list of image tensors, each image i may have different size", "If an int, the same `size_divisible` will be applied to all the input", "+ 2)}-D Tensor, got Tensor shape {input_images.shape}.\" ) elif isinstance(input_images, List): for img", "not in target.keys()) or (target_box_key not in target.keys()): raise ValueError( f\"{target_label_key} and {target_box_key}", "modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}. One of the listed string", "or 3D. size_divisible: int or Sequence[int], is the expected pattern on the input", "spatial_dims: number of spatial dimensions of the images, 2D or 3D. size_divisible: int", ") return def pad_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]], mode:", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "import Tensor from monai.transforms.croppad.array import SpatialPad from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode from monai.utils", "1: raise ValueError( \"When input_images is a List[Tensor], each element should have be", "compute max_spatial_size image_sizes_t = torch.tensor(image_sizes) max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0) if len(max_spatial_size_t) !=", "be (spatial_dims + 1)-D.\" f\"In this case, it should be a {(spatial_dims +", "or (H, W, D) is divisible by size_divisible. Default padding uses constant padding", "inputs - pad the inputs so that the output spatial sizes are divisible", "{target.keys()}.\" ) boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): raise ValueError(f\"Expected target boxes", "at the end to create a (B, C, H, W) or (B, C,", "mode=mode).value return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0] # If input_images: List[Tensor])", "0)) for i, sp_i in enumerate(new_size)] pt_pad_width = [val for sublist in all_pad_width", "\" f\"of shape [N, {2* spatial_dims}], got {boxes.shape}.\" ) return def pad_images( input_images:", "len(input_images.shape) != spatial_dims + 2: raise ValueError( \"When input_images is a Tensor, its", "image tensors, each image i may have different size (C, H_i, W_i) or", "# if there is no need to pad return input_images, [orig_size] * input_images.shape[0]", "1) a tensor sized (B, C, H, W) or (B, C, H, W,", "ss in image_sizes] def preprocess_images( input_images: Union[List[Tensor], Tensor], spatial_dims: int, size_divisible: Union[int, Sequence[int]],", "else: raise ValueError(\"input_images needs to be a List[Tensor] or Tensor.\") return def check_training_targets(" ]
[ "dist = 1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0) #", "numeric tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems", "tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens", "numpy as np import pandas as pd import nltk import re import os", "by sentence, then by word to ensure that punctuation is caught as it's", "return stems def tokenize_only(text): # first tokenize by sentence, then by word to", "- cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix)", "object from utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr =", "str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True,", "= nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): #", "tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape)", "in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized)", "import mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import", "replace 6 with n words per cluster print '{}'.format(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')), print ''", "for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized", "verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for", "if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return stems", "t in filtered_tokens] return stems def tokenize_only(text): # first tokenize by sentence, then", "sys import os import code # Data related import import numpy as np", "tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0])", "os import code # Data related import import numpy as np import pandas", "[word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = [] #", "# Data related import import numpy as np import pandas as pd import", "= [] totalvocab_stemmed = [] totalvocab_tokenized = [] for f in files: tweets", "working_directory = os.getcwd() files = Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO: remove", "tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return", "then by word to ensure that punctuation is caught as it's own token", "files = files[:800] all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized = [] for", "caught as it's own token tokens = [word for sent in nltk.sent_tokenize(text) for", "punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for", "import re import os import codecs from sklearn import feature_extraction import mpld3 from", "the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print", "tokens = [word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens =", "[] totalvocab_tokenized = [] for f in files: tweets = Reader.read_file(f) selected_tweets =", "for t in filtered_tokens] return stems def tokenize_only(text): # first tokenize by sentence,", "= files[:800] all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized = [] for f", "are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,", "10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids", "# OS-level import import sys import os import code # Data related import", "TweetTokenizer # Project related object from utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer", "tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses print", "Data related import import numpy as np import pandas as pd import nltk", "sentence, then by word to ensure that punctuation is caught as it's own", "import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import", "item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i)", "import cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer # Project related", "tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize by sentence, then", "return filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files =", "is caught as it's own token tokens = [word.lower() for sent in nltk.sent_tokenize(text)", "files = Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO: remove me files =", "== 'en', tweets) texts = map(lambda t: t.body(), selected_tweets) for i in texts:", "nltk.tokenize import TweetTokenizer # Project related object from utils import Reader stopwords =", "import os import codecs from sklearn import feature_extraction import mpld3 from nltk.stem.snowball import", "= 10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist()", "utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True,", "from nltk.tokenize import TweetTokenizer # Project related object from utils import Reader stopwords", "order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print 'Cluster {} words: '.format(str(i)),", "f in files: tweets = Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and t.language()", "(e.g., numeric tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token)", "ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses print 'td-idf", "sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer # Project", "np import pandas as pd import nltk import re import os import codecs", "= [stemmer.stem(t) for t in filtered_tokens] return stems def tokenize_only(text): # first tokenize", "allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are '", "caught as it's own token tokens = [word.lower() for sent in nltk.sent_tokenize(text) for", "KMeans from nltk.tokenize import TweetTokenizer # Project related object from utils import Reader", "re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory =", "[] # filter out any tokens not containing letters (e.g., numeric tokens, raw", "'en', tweets) texts = map(lambda t: t.body(), selected_tweets) for i in texts: allwords_stemmed", "for f in files: tweets = Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and", "filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files", "i in range(num_clusters): print 'Cluster {} words: '.format(str(i)), for ind in order_centroids[i, :6]:", "pd import nltk import re import os import codecs from sklearn import feature_extraction", "tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed'", "= [] # filter out any tokens not containing letters (e.g., numeric tokens,", "totalvocab_tokenized = [] for f in files: tweets = Reader.read_file(f) selected_tweets = filter(lambda", "is caught as it's own token tokens = [word for sent in nltk.sent_tokenize(text)", "= map(lambda t: t.body(), selected_tweets) for i in texts: allwords_stemmed = tokenize_and_stem(i) #", "tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame =", "the vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist =", "'__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory) print '{} files available'.format(len(files))", "raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if", "from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from", "# extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized},", "letters (e.g., numeric tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token):", "in files: tweets = Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and t.language() ==", "in texts: allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed)", "map(lambda t: t.body(), selected_tweets) for i in texts: allwords_stemmed = tokenize_and_stem(i) # for", "import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize import", "from utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False,", "tweets) texts = map(lambda t: t.body(), selected_tweets) for i in texts: allwords_stemmed =", "# Project related object from utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer =", "= [] for f in files: tweets = Reader.read_file(f) selected_tweets = filter(lambda t:", "re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return stems def", "to ensure that punctuation is caught as it's own token tokens = [word", "import import numpy as np import pandas as pd import nltk import re", "strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize by sentence, then by word to", "filter(lambda t: t.is_post() and t.language() == 'en', tweets) texts = map(lambda t: t.body(),", "vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix =", "to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1 -", "filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory)", "nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = [] # filter out any tokens", "totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) +", "matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters = 10", "Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en', tweets) texts =", "for ind in order_centroids[i, :6]: # replace 6 with n words per cluster", "word to ensure that punctuation is caught as it's own token tokens =", "out any tokens not containing letters (e.g., numeric tokens, raw punctuation) for token", "to ensure that punctuation is caught as it's own token tokens = [word.lower()", "tokenize by sentence, then by word to ensure that punctuation is caught as", "punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__", "that punctuation is caught as it's own token tokens = [word for sent", "= 1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(),", "# coding=utf-8 # OS-level import import sys import os import code # Data", "Project related object from utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\")", "import sys import os import code # Data related import import numpy as", "order_centroids[i, :6]: # replace 6 with n words per cluster print '{}'.format(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8',", ":6]: # replace 6 with n words per cluster print '{}'.format(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')),", "own token tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)]", "not containing letters (e.g., numeric tokens, raw punctuation) for token in tokens: if", "for i in range(num_clusters): print 'Cluster {} words: '.format(str(i)), for ind in order_centroids[i,", "sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer # Project related object from utils", "items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))", "range(num_clusters): print 'Cluster {} words: '.format(str(i)), for ind in order_centroids[i, :6]: # replace", "ind in order_centroids[i, :6]: # replace 6 with n words per cluster print", "stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize", "OS-level import import sys import os import code # Data related import import", "in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix", "' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,", "'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8,", "from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer #", "selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en', tweets) texts = map(lambda", "list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are", "codecs from sklearn import feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text", "# fit the vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names()", "related object from utils import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr", "from sklearn import feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import", "km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print 'Cluster {} words:", "t.body(), selected_tweets) for i in texts: allwords_stemmed = tokenize_and_stem(i) # for each item", "t: t.is_post() and t.language() == 'en', tweets) texts = map(lambda t: t.body(), selected_tweets)", "it's own token tokens = [word for sent in nltk.sent_tokenize(text) for word in", "TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize by sentence, then by word", "synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix)", "all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized = [] for f in files:", "first tokenize by sentence, then by word to ensure that punctuation is caught", "print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer =", "filtered_tokens = [] # filter out any tokens not containing letters (e.g., numeric", "**locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters):", "print 'Cluster {} words: '.format(str(i)), for ind in order_centroids[i, :6]: # replace 6", "'{} files available'.format(len(files)) # TODO: remove me files = files[:800] all_tweets = []", "== '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory) print '{} files", "'.format(str(i)), for ind in order_centroids[i, :6]: # replace 6 with n words per", "sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize", "texts = map(lambda t: t.body(), selected_tweets) for i in texts: allwords_stemmed = tokenize_and_stem(i)", "import import sys import os import code # Data related import import numpy", "# code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i", "'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame", "totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame'", "punctuation is caught as it's own token tokens = [word.lower() for sent in", "for i in texts: allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets',", "def tokenize_and_stem(text): # first tokenize by sentence, then by word to ensure that", "Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def", "import os import code # Data related import import numpy as np import", "selected_tweets) for i in texts: allwords_stemmed = tokenize_and_stem(i) # for each item in", "from sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer # Project related object from", "raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t)", "def tokenize_only(text): # first tokenize by sentence, then by word to ensure that", "available'.format(len(files)) # TODO: remove me files = files[:800] all_tweets = [] totalvocab_stemmed =", "= os.getcwd() files = Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO: remove me", "= filter(lambda t: t.is_post() and t.language() == 'en', tweets) texts = map(lambda t:", "print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters", "for word in tknzr.tokenize(sent)] filtered_tokens = [] # filter out any tokens not", "for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = [] # filter", "= SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize by", "= tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters,", "coding=utf-8 # OS-level import import sys import os import code # Data related", "by word to ensure that punctuation is caught as it's own token tokens", "if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory) print", "tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems =", "# first tokenize by sentence, then by word to ensure that punctuation is", "import nltk import re import os import codecs from sklearn import feature_extraction import", "filtered_tokens] return stems def tokenize_only(text): # first tokenize by sentence, then by word", "t.is_post() and t.language() == 'en', tweets) texts = map(lambda t: t.body(), selected_tweets) for", "'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there", "num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters =", "km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print 'Cluster {} words: '.format(str(i)), for ind", "punctuation is caught as it's own token tokens = [word for sent in", "use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses", "import TweetTokenizer # Project related object from utils import Reader stopwords = nltk.corpus.stopwords.words('english')", "::-1] for i in range(num_clusters): print 'Cluster {} words: '.format(str(i)), for ind in", "token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__':", "' + str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05,", "nltk import re import os import codecs from sklearn import feature_extraction import mpld3", "files available'.format(len(files)) # TODO: remove me files = files[:800] all_tweets = [] totalvocab_stemmed", "texts: allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) #", "tokens not containing letters (e.g., numeric tokens, raw punctuation) for token in tokens:", "Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO: remove me files = files[:800] all_tweets", "in filtered_tokens] return stems def tokenize_only(text): # first tokenize by sentence, then by", "# for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list", "[stemmer.stem(t) for t in filtered_tokens] return stems def tokenize_only(text): # first tokenize by", "in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens]", "cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer # Project related object", "code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in", "print '{} files available'.format(len(files)) # TODO: remove me files = files[:800] all_tweets =", "if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory", "tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms =", "3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses print 'td-idf matrix:", "= tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms", "tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens =", "= TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) #", "index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer", "import code # Data related import import numpy as np import pandas as", "related import import numpy as np import pandas as pd import nltk import", "import codecs from sklearn import feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer from", "that punctuation is caught as it's own token tokens = [word.lower() for sent", "import feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from", "sklearn import feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer", "= km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print 'Cluster {}", "TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit", "= pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) + ' items", "= Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO: remove me files = files[:800]", "os import codecs from sklearn import feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer", "in tknzr.tokenize(sent)] filtered_tokens = [] # filter out any tokens not containing letters", "= tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' +", "[word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = [] #", "in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__': reload(sys)", "sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = [] # filter out", "= [] totalvocab_tokenized = [] for f in files: tweets = Reader.read_file(f) selected_tweets", "feature_extraction import mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise", "reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory) print '{} files available'.format(len(files)) #", "token tokens = [word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens", "allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend", "files: tweets = Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en',", "totalvocab_stemmed = [] totalvocab_tokenized = [] for f in files: tweets = Reader.read_file(f)", "fit the vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist", "km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print", "re import os import codecs from sklearn import feature_extraction import mpld3 from nltk.stem.snowball", "vectorizer to synopses print 'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1", "# TODO: remove me files = files[:800] all_tweets = [] totalvocab_stemmed = []", "mpld3 from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity", "# replace 6 with n words per cluster print '{}'.format(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')), print", "vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) + '", "stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to", "as it's own token tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word", "[] for f in files: tweets = Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post()", "t.language() == 'en', tweets) texts = map(lambda t: t.body(), selected_tweets) for i in", "stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text):", "token tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens", "1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals()))", "ensure that punctuation is caught as it's own token tokens = [word for", "i in texts: allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem", "own token tokens = [word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)]", "word in tknzr.tokenize(sent)] filtered_tokens = [] # filter out any tokens not containing", "token): filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd()", "[] totalvocab_stemmed = [] totalvocab_tokenized = [] for f in files: tweets =", "code # Data related import import numpy as np import pandas as pd", "km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids =", "t: t.body(), selected_tweets) for i in texts: allwords_stemmed = tokenize_and_stem(i) # for each", "remove me files = files[:800] all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized =", "nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster", "ensure that punctuation is caught as it's own token tokens = [word.lower() for", "terms = tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km =", "SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize by sentence,", "tokenize_only(text): # first tokenize by sentence, then by word to ensure that punctuation", "os.getcwd() files = Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO: remove me files", "= km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print 'Cluster {} words: '.format(str(i)), for", "clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(num_clusters): print 'Cluster", "in order_centroids[i, :6]: # replace 6 with n words per cluster print '{}'.format(vocab_frame.ix[terms[ind].split('", "me files = files[:800] all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized = []", "__name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory) print '{}", "+ str(vocab_frame.shape[0]) + ' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english',", "import pandas as pd import nltk import re import os import codecs from", "filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation) for", "pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) + ' items in", "{}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km", "TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from nltk.tokenize import TweetTokenizer", "files[:800] all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized = [] for f in", "pandas as pd import nltk import re import os import codecs from sklearn", "SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans", "import KMeans from nltk.tokenize import TweetTokenizer # Project related object from utils import", "numeric tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return", "filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return stems def tokenize_only(text): #", "in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = [] # filter out any", "it's own token tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in", "max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the", "= KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:,", "as pd import nltk import re import os import codecs from sklearn import", "and t.language() == 'en', tweets) texts = map(lambda t: t.body(), selected_tweets) for i", "sys.setdefaultencoding('utf-8') working_directory = os.getcwd() files = Reader.read_directory(working_directory) print '{} files available'.format(len(files)) # TODO:", "containing letters (e.g., numeric tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]',", "{} words: '.format(str(i)), for ind in order_centroids[i, :6]: # replace 6 with n", "cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters", "stems = [stemmer.stem(t) for t in filtered_tokens] return stems def tokenize_only(text): # first", "= [word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = []", "TODO: remove me files = files[:800] all_tweets = [] totalvocab_stemmed = [] totalvocab_tokenized", "tweets = Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en', tweets)", "any tokens not containing letters (e.g., numeric tokens, raw punctuation) for token in", "= tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the", "words: '.format(str(i)), for ind in order_centroids[i, :6]: # replace 6 with n words", "in range(num_clusters): print 'Cluster {} words: '.format(str(i)), for ind in order_centroids[i, :6]: #", "KMeans(n_clusters=num_clusters, verbose=0) # code.interact(local=dict(globals(), **locals())) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1]", "tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters = 10 km = KMeans(n_clusters=num_clusters, verbose=0)", "'Cluster {} words: '.format(str(i)), for ind in order_centroids[i, :6]: # replace 6 with", "'td-idf matrix: {}'.format(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() dist = 1 - cosine_similarity(tfidf_matrix) num_clusters =", "extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed)", "tknzr.tokenize(sent)] filtered_tokens = [] # filter out any tokens not containing letters (e.g.,", "for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__ ==", "each item in 'all_tweets', tokenize/stem totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized =", "stems def tokenize_only(text): # first tokenize by sentence, then by word to ensure", "tokenize_and_stem(text): # first tokenize by sentence, then by word to ensure that punctuation", "+ ' items in vocab_frame' tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem,", "= TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first tokenize by sentence, then by", "token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in", "from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from", "# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)", "= [word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)] filtered_tokens = []", "nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) def tokenize_and_stem(text): # first", "min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer", "tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens if __name__ == '__main__': reload(sys) sys.setdefaultencoding('utf-8')", "for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t", "token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return stems def tokenize_only(text):", "as np import pandas as pd import nltk import re import os import", "<filename>document_clustering/cluster.py # coding=utf-8 # OS-level import import sys import os import code #", "totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list allwords_tokenized = tokenize_only(i) totalvocab_tokenized.extend(allwords_tokenized) vocab_frame = pd.DataFrame({'words':", "import Reader stopwords = nltk.corpus.stopwords.words('english') stemmer = SnowballStemmer(\"english\") tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)", "reduce_len=True) def tokenize_and_stem(text): # first tokenize by sentence, then by word to ensure", "= Reader.read_file(f) selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en', tweets) texts", "import numpy as np import pandas as pd import nltk import re import", "as it's own token tokens = [word for sent in nltk.sent_tokenize(text) for word", "tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.05, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3)) tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets)" ]