ngram
listlengths
0
67.8k
[ "mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class", "import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop)", "cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL", "self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher =", "from unittest import mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher", "'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url())", "'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK',", "self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url'", "def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class", "1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User':", "= 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2',", "self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password')", "'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password')", "zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username',", "zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self):", "test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url',", "= zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname',", "setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class =", "def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client =", "client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User': 'username', 'Pass': 'password'},", "self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn", "= mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop)", "'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL =", "cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url())", "mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl',", "= requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport')", "self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL", "unittest import mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher =", "unittest from unittest import mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self):", "cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client", "import mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session')", "self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn", "CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL", "= zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def", "self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self):", "= CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2',", "class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher =", "'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2'", "requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class", "self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url'", "zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called()", "mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher", "self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL =", "def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn =", "'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK',", "cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def", "TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client')", "requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start()", "CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start() self.addCleanup(requests_session_patcher.stop) zeep_client_patcher", "from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class =", "CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username',", "cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn =", "'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client =", "transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK',", "= cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User': 'username', 'Pass': 'password'}, 1033)", "= CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport,", "cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2',", "test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class()) def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname',", "cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User': 'username', 'Pass': 'password'}, 1033) self.assertEqual(client,", "zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start()", "cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname',", "<filename>sast_controller/tests/drivers/test_checkmarx_connection.py import unittest from unittest import mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase):", "'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client", "CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False)", "mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def", "= mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop)", "def test_client_url(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1)", "1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self):", "self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User': 'username', 'Pass': 'password'}, 1033) self.assertEqual(client, cx_conn.clients['SDK'])", "1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username',", "test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client()", "= 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User': 'username',", "= mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class())", "import unittest from unittest import mock from sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def", "= 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password')", "1).ServiceURL = 'service_url' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1) self.assertEqual('service_url', cx_conn.get_client_url()) cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2' cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1)", "cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class()", "cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl',", "sast_controller.drivers.cx import CheckmarxConnection class TestCheckmarxConnection(unittest.TestCase): def setUp(self): requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session') self.requests_session_class = requests_session_patcher.start()", "self.addCleanup(requests_session_patcher.stop) zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client') self.zeep_client_class = zeep_client_patcher.start() self.addCleanup(zeep_client_patcher.stop) zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport') self.zeep_transport_class =", "'service_url' client = cx_conn.get_client() self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False) zeep_client = self.zeep_client_class() zeep_client.service.Login.assert_called_with({'User': 'username', 'Pass':", "1) self.assertEqual('service_url_2', cx_conn.get_client_url()) def test_get_client(self): cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL =", "self.zeep_transport_class = zeep_transport_patcher.start() self.addCleanup(zeep_transport_patcher.stop) def test_checkmarx_connection(self): CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password') self.requests_session_class.assert_called() self.zeep_transport_class.assert_called_with(session=self.requests_session_class()) self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl', transport=self.zeep_transport_class())" ]
[ "os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path): childPath = os.path.join(path,fileName) total += disk_usage(childPath)", "os def disk_usage(path): total = os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path): childPath", "total = os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path): childPath = os.path.join(path,fileName) total", "import os def disk_usage(path): total = os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path):", "os.path.isdir(path): for fileName in os.listdir(path): childPath = os.path.join(path,fileName) total += disk_usage(childPath) print('0:<7'.format(total),path) return", "= os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path): childPath = os.path.join(path,fileName) total +=", "for fileName in os.listdir(path): childPath = os.path.join(path,fileName) total += disk_usage(childPath) print('0:<7'.format(total),path) return total", "def disk_usage(path): total = os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path): childPath =", "disk_usage(path): total = os.path.getsize(path) if os.path.isdir(path): for fileName in os.listdir(path): childPath = os.path.join(path,fileName)", "if os.path.isdir(path): for fileName in os.listdir(path): childPath = os.path.join(path,fileName) total += disk_usage(childPath) print('0:<7'.format(total),path)" ]
[]
[ "= \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = ( \"The average", "os.remove(outfile) # Test 2: When input data is a time series x_label =", "line_3 = \"The minimum of distance occurs at 0.000 ns.\\n\" line_4 = \"The", "readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3,", "7, 8] # Below we test from reading the file to cleaning the", "Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare the", "2, 3, 4, 5, 6, 7, 8] # Below we test from reading", "cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 =", "np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2)", "of free energy: 100.000 kT, which occurs at 0.000 deg.\\n\" texts = [line_1,", "True assert texts == lines os.remove(outfile) # Test 2: When input data is", "occurs at 0.000 ns.\\n\" line_4 = \"The distance (149.000 nm) at 49.000 ns", "1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3)", "is a time series x_label = \"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x,", "1) * 2 def test_scale_data(): f = 2 T = 300 c1 =", "# Written by <NAME> <<EMAIL>> # # Copyright (c) 2021 University of Colorado", "4, 5, 6, 7, 8] assert len(x2) == 3000 assert len(y2) == 3000", "= np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355])", "2, 7, 8, 4, 3] # not the x-data for a typical time", "100.000)\\n\" ) line_2 = \"The maximum of distance occurs at 99.000 ns.\\n\" line_3", "-19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 =", "\"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted =", "line_4] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True", "Below we test from reading the file to cleaning the data x2, y2", "= infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert texts == lines os.remove(outfile) #", "data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 =", "Test 1: When input data is not a time series x_label = \"Dihedral", "3] # not the x-data for a typical time seris y1 = [1,", "to radian\": c2, \"radian to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for", "= \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = \"Maximum of", "data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here", "for i in conversion_dict: expected = data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data,", "occurs at 0.000 deg.\\n\" texts = [line_1, line_2] infile = open(outfile, \"r\") lines", "data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = \"Maximum of free energy: 199.000 kT,", "of Colorado Boulder # # # #################################################################### \"\"\" Unit tests for the module", "-2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2,", "dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable by np.loadtxt", "the first 5 elements to save up some space x1, y1 = x1[:5],", "the x-data for a typical time seris y1 = [1, 2, 3, 4,", "len(y2) == 3000 assert len(x3) == 1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2)))", "( \"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\" )", "line_2, line_3, line_4] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile)", "open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert texts ==", "#################################################################### \"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy", "6, 2, 7, 8, 4, 3] # not the x-data for a typical", "of distance occurs at 0.000 ns.\\n\" line_4 = \"The distance (149.000 nm) at", "c1 * c3, \"kcal/mol to kT\": 1 / (c1 * c3), \"kJ/mol to", "np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected ) def test_slice_data(): data = np.arange(100) data_unchaged", "= output_path + \"/test_output.txt\" # Test 1: When input data is not a", "4, 3] assert list(y1) == [3, 4, 5, 6, 7, 8] assert len(x2)", "2 def test_scale_data(): f = 2 T = 300 c1 = 1.38064852 *", "fes_file = input_path + \"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted = input_path", "== 79 def test_analyze_data(): x = np.arange(100) y = np.arange(100, 200) outfile =", "np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert data_2[-1] == 19 assert data_3[0] ==", "col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare the first", "2: not readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default", "-8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2)", "-8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3)", "== 20 assert data_3[-1] == 79 def test_analyze_data(): x = np.arange(100) y =", "\"kcal/mol to kJ/mol\": 1 / c3, \"degree to radian\": c2, \"radian to degree\":", "the file to cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output", "assert texts == lines os.remove(outfile) # Test 2: When input data is a", "series x_label = \"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label,", "-20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array(", "line_2 = \"The maximum of distance occurs at 99.000 ns.\\n\" line_3 = \"The", "== 19 assert data_3[0] == 20 assert data_3[-1] == 79 def test_analyze_data(): x", "# # Copyright (c) 2021 University of Colorado Boulder # # # ####################################################################", "= input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable by np.loadtxt x1,", "def test_deduplicate_data(): x1 = [2, 4, 6, 2, 7, 8, 4, 3] #", "* c3, \"kcal/mol to kT\": 1 / (c1 * c3), \"kJ/mol to kcal/mol\":", "== 1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1) *", "data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert data_2[-1]", "# # #################################################################### \"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os", "data) for i in conversion_dict: expected = data * conversion_dict[i] * f np.testing.assert_array_almost_equal(", "\"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable by", "compare the first 5 elements to save up some space x1, y1 =", "not a time series x_label = \"Dihedral (deg)\" y_label = \"Free energy (kT)\"", "line_2] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True", "yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2, 4, 6, 2,", "-8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3,", "nm) at 49.000 ns is closet to the average.\\n\" texts = [line_1, line_2,", "6, 8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 =", "xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2, 4, 6, 2, 7, 8,", "outfile = output_path + \"/test_output.txt\" # Test 1: When input data is not", "c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 / c3, \"degree to", "ns.\\n\" line_4 = \"The distance (149.000 nm) at 49.000 ns is closet to", "y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1)", "line_4 = \"The distance (149.000 nm) at 49.000 ns is closet to the", "numpy as np import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path,", "yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608,", "2, 7, 8, 4, 3] assert list(y1) == [3, 4, 5, 6, 7,", "= input_path + \"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted = input_path +", "= data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data,", "x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2, 7, 8, 4,", "np.pi / 180 c3 = 0.239005736 data = np.random.rand(100) conversion_dict = { \"ns", "c2 = np.pi / 180 c3 = 0.239005736 data = np.random.rand(100) conversion_dict =", "a python package to visualize the results obtained from MD # # #", "c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected = data * conversion_dict[i]", "data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data,", "20 assert data_3[-1] == 79 def test_analyze_data(): x = np.arange(100) y = np.arange(100,", "energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = \"Maximum of free energy:", "input_path + \"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\"", "Test 2: When input data is a time series x_label = \"Time (ns)\"", "x3[:5], y3[:5] # Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1", "assert len(x3) == 1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2) -", "= data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) #", "200) outfile = output_path + \"/test_output.txt\" # Test 1: When input data is", "# Copyright (c) 2021 University of Colorado Boulder # # # #################################################################### \"\"\"", "== 1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1 assert int(np.sum(np.diff(x3))) ==", "not readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx", "assert os.path.isfile(outfile) is True assert texts == lines os.remove(outfile) # Test 2: When", "3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare", "-8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3,", "\"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data():", "infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert texts == lines os.remove(outfile) # Test", "assert len(x2) == 3000 assert len(y2) == 3000 assert len(x3) == 1501 assert", "truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert", "np.random.rand(100) conversion_dict = { \"ns to ps\": 1000, \"ps to ns\": 1 /", "test_analyze_data(): x = np.arange(100) y = np.arange(100, 200) outfile = output_path + \"/test_output.txt\"", "np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338]", "= np.arange(100) y = np.arange(100, 200) outfile = output_path + \"/test_output.txt\" # Test", "data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) #", "T = 300 c1 = 1.38064852 * 6.022 * T / 1000 c2", "current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file =", "* 2 def test_scale_data(): f = 2 T = 300 c1 = 1.38064852", "\"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = ( \"The average of", "the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as np import MD_plotting_toolkit.data_processing as", "xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data():", "xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2, 4,", "data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert", "\"Maximum of free energy: 199.000 kT, which occurs at 99.000 deg.\\n\" line_2 =", "obtained from MD # # # # Written by <NAME> <<EMAIL>> # #", "free energy: 100.000 kT, which occurs at 0.000 deg.\\n\" texts = [line_1, line_2]", "# PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 =", "to kT\": 1 / c1, \"kT to kcal/mol\": c1 * c3, \"kcal/mol to", "a time series x_label = \"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y,", "/ c3, \"degree to radian\": c2, \"radian to degree\": 1 / c2, }", "def test_analyze_data(): x = np.arange(100) y = np.arange(100, 200) outfile = output_path +", "line_2 = \"Minimum of free energy: 100.000 kT, which occurs at 0.000 deg.\\n\"", "0.000 deg.\\n\" texts = [line_1, line_2] infile = open(outfile, \"r\") lines = infile.readlines()", "= os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path", "len(x3) == 1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1)", "= data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert data_2[-1] ==", "180 c3 = 0.239005736 data = np.random.rand(100) conversion_dict = { \"ns to ps\":", "free energy: 199.000 kT, which occurs at 99.000 deg.\\n\" line_2 = \"Minimum of", "texts = [line_1, line_2, line_3, line_4] infile = open(outfile, \"r\") lines = infile.readlines()", "199.000 kT, which occurs at 99.000 deg.\\n\" line_2 = \"Minimum of free energy:", "x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5] #", "np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def", "output x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3", "1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2 def test_scale_data(): f =", "1000 c2 = np.pi / 180 c3 = 0.239005736 data = np.random.rand(100) conversion_dict", "\"radian to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict:", "conversion_dict: expected = data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T),", "kT\": 1 / c1, \"kT to kcal/mol\": c1 * c3, \"kcal/mol to kT\":", "os.path.isfile(outfile) is True assert texts == lines os.remove(outfile) # Test 2: When input", "up some space x1, y1 = x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5]", "2 T = 300 c1 = 1.38064852 * 6.022 * T / 1000", "np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected = data * conversion_dict[i] * f", "20 assert data_2[-1] == 19 assert data_3[0] == 20 assert data_3[-1] == 79", "truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert data_2[-1] == 19 assert", "\"The distance (149.000 nm) at 49.000 ns is closet to the average.\\n\" texts", "+ \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable by np.loadtxt x1, y1 =", "MD # # # # Written by <NAME> <<EMAIL>> # # Copyright (c)", "f = 2 T = 300 c1 = 1.38064852 * 6.022 * T", ") def test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20)", "(c1 * c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 / c3,", "79 def test_analyze_data(): x = np.arange(100) y = np.arange(100, 200) outfile = output_path", "output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file = input_path +", "= ( \"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\"", "y1 = data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3,", "= \"Minimum of free energy: 100.000 kT, which occurs at 0.000 deg.\\n\" texts", "{ \"ns to ps\": 1000, \"ps to ns\": 1 / 1000, \"kT to", "99.000 deg.\\n\" line_2 = \"Minimum of free energy: 100.000 kT, which occurs at", "at 49.000 ns is closet to the average.\\n\" texts = [line_1, line_2, line_3,", "1: When input data is not a time series x_label = \"Dihedral (deg)\"", "data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = ( \"The average of distance: 149.500", "1 / 1000, \"kT to kJ/mol\": c1, \"kJ/mol to kT\": 1 / c1,", "output_path + \"/test_output.txt\" # Test 1: When input data is not a time", "\"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as", "/ c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected = data *", "/ 1000 c2 = np.pi / 180 c3 = 0.239005736 data = np.random.rand(100)", "= os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file = input_path + \"/potential.xvg\"", "Case 1: readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case 2: not", "max: 199.000, min: 100.000)\\n\" ) line_2 = \"The maximum of distance occurs at", "c3 = 0.239005736 data = np.random.rand(100) conversion_dict = { \"ns to ps\": 1000,", "input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\"", "* T / 1000 c2 = np.pi / 180 c3 = 0.239005736 data", "deg.\\n\" texts = [line_1, line_2] infile = open(outfile, \"r\") lines = infile.readlines() infile.close()", "= \"The distance (149.000 nm) at 49.000 ns is closet to the average.\\n\"", "= data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2, 7, 8, 4, 3] assert", "only compare the first 5 elements to save up some space x1, y1", "Here we only compare the first 5 elements to save up some space", "4, 6, 8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3", "19 assert data_3[0] == 20 assert data_3[-1] == 79 def test_analyze_data(): x =", "data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0]", "= [line_1, line_2] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile)", "= \"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1", "= x3[:5], y3[:5] # Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])", "== (len(x3) - 1) * 2 def test_scale_data(): f = 2 T =", "by <NAME> <<EMAIL>> # # Copyright (c) 2021 University of Colorado Boulder #", "os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file =", "= data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1,", "some space x1, y1 = x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5] x3,", "module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as np import MD_plotting_toolkit.data_processing as data_processing", "kJ/mol\": 1 / c3, \"degree to radian\": c2, \"radian to degree\": 1 /", "} np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected = data * conversion_dict[i] *", "infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert", "(kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = \"Maximum of free energy: 199.000", "y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4)", "line_1 = ( \"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min:", "reading the file to cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED", "y3) assert list(x1) == [6, 2, 7, 8, 4, 3] assert list(y1) ==", "of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\" ) line_2 = \"The", "\"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert texts == lines", "distance occurs at 0.000 ns.\\n\" line_4 = \"The distance (149.000 nm) at 49.000", "results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355,", "y1[:5] x2, y2 = x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5] # Expected", "# MD_plotting_toolkit, # # a python package to visualize the results obtained from", "data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged)", "#################################################################### # # # MD_plotting_toolkit, # # a python package to visualize the", "\"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 / c3, \"degree to radian\":", "which occurs at 0.000 deg.\\n\" texts = [line_1, line_2] infile = open(outfile, \"r\")", "# Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355,", "# # # MD_plotting_toolkit, # # a python package to visualize the results", "kT\": 1 / (c1 * c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\":", "input_path + \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\"", "distance (149.000 nm) at 49.000 ns is closet to the average.\\n\" texts =", "= input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case", "by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3, y3", "deg.\\n\" line_2 = \"Minimum of free energy: 100.000 kT, which occurs at 0.000", "y = np.arange(100, 200) outfile = output_path + \"/test_output.txt\" # Test 1: When", "np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2, 4, 6, 2, 7, 8, 4,", ") xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731,", "= [line_1, line_2, line_3, line_4] infile = open(outfile, \"r\") lines = infile.readlines() infile.close()", "space x1, y1 = x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5] x3, y3", "np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2, 4, 6,", "min: 100.000)\\n\" ) line_2 = \"The maximum of distance occurs at 99.000 ns.\\n\"", "1.38064852 * 6.022 * T / 1000 c2 = np.pi / 180 c3", "radian\": c2, \"radian to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i", "data_1[0] == 20 assert data_2[-1] == 19 assert data_3[0] == 20 assert data_3[-1]", "y1 = x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5] x3, y3 = x3[:5],", "1 / c3, \"degree to radian\": c2, \"radian to degree\": 1 / c2,", "(c) 2021 University of Colorado Boulder # # # #################################################################### \"\"\" Unit tests", "= np.random.rand(100) conversion_dict = { \"ns to ps\": 1000, \"ps to ns\": 1", "1 / (c1 * c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1", "[2, 4, 6, 2, 7, 8, 4, 3] # not the x-data for", "PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 = data_processing.deduplicate_data(x1,", "to kJ/mol\": c1, \"kJ/mol to kT\": 1 / c1, \"kT to kcal/mol\": c1", "= data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2,", "4, 6, 2, 7, 8, 4, 3] # not the x-data for a", "GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3,", "# Case 3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we", "data is a time series x_label = \"Time (ns)\" y_label = \"Distance (nm)\"", "\"/test_output.txt\" # Test 1: When input data is not a time series x_label", "of distance occurs at 99.000 ns.\\n\" line_3 = \"The minimum of distance occurs", "6, 7, 8] # Below we test from reading the file to cleaning", "# Case 1: readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case 2:", "col_idx=4) # Here we only compare the first 5 elements to save up", "[line_1, line_2] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is", "4, 3] # not the x-data for a typical time seris y1 =", "# # Written by <NAME> <<EMAIL>> # # Copyright (c) 2021 University of", "# #################################################################### \"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import", "6.022 * T / 1000 c2 = np.pi / 180 c3 = 0.239005736", "len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1 assert int(np.sum(np.diff(x3)))", "# GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2)", "T), expected ) def test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 =", "Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as np", "x1, y1 = data_processing.read_2d_data(fes_file) # Case 2: not readable by np.loadtxt x2, y2", "results obtained from MD # # # # Written by <NAME> <<EMAIL>> #", "outfile) line_1 = \"Maximum of free energy: 199.000 kT, which occurs at 99.000", "x1 = [2, 4, 6, 2, 7, 8, 4, 3] # not the", "When input data is not a time series x_label = \"Dihedral (deg)\" y_label", "as np import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\")", "300 c1 = 1.38064852 * 6.022 * T / 1000 c2 = np.pi", "def test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2", "test_read_2d_data(): # Case 1: readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case", "at 0.000 deg.\\n\" texts = [line_1, line_2] infile = open(outfile, \"r\") lines =", "7, 8] assert len(x2) == 3000 assert len(y2) == 3000 assert len(x3) ==", "assert data_3[-1] == 79 def test_analyze_data(): x = np.arange(100) y = np.arange(100, 200)", "energy: 199.000 kT, which occurs at 99.000 deg.\\n\" line_2 = \"Minimum of free", "np.array([0, 2, 4, 6, 8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078]", "= 1.38064852 * 6.022 * T / 1000 c2 = np.pi / 180", "8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265,", "c1, \"kJ/mol to kT\": 1 / c1, \"kT to kcal/mol\": c1 * c3,", "i in conversion_dict: expected = data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i,", ") line_2 = \"The maximum of distance occurs at 99.000 ns.\\n\" line_3 =", "yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1)", "kJ/mol\": c1, \"kJ/mol to kT\": 1 / c1, \"kT to kcal/mol\": c1 *", "np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2, 4, 6, 2, 7,", "data = np.random.rand(100) conversion_dict = { \"ns to ps\": 1000, \"ps to ns\":", "c3, \"degree to radian\": c2, \"radian to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data),", "to kJ/mol\": 1 / c3, \"degree to radian\": c2, \"radian to degree\": 1", "\"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file)", "== 20 assert data_2[-1] == 19 assert data_3[0] == 20 assert data_3[-1] ==", "data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected ) def", "1 / c1, \"kT to kcal/mol\": c1 * c3, \"kcal/mol to kT\": 1", "infile.close() assert os.path.isfile(outfile) is True assert texts == lines os.remove(outfile) # Test 2:", "f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected ) def test_slice_data(): data = np.arange(100)", "kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 / c3, \"degree to radian\": c2, \"radian", "python package to visualize the results obtained from MD # # # #", "import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path =", "to ns\": 1 / 1000, \"kT to kJ/mol\": c1, \"kJ/mol to kT\": 1", "from reading the file to cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted) #", "data_processing.scale_data(data, i, f, T), expected ) def test_slice_data(): data = np.arange(100) data_unchaged =", "y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2, 7, 8, 4, 3]", "[-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2,", "to kT\": 1 / (c1 * c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to", "# not the x-data for a typical time seris y1 = [1, 2,", "= 0.239005736 data = np.random.rand(100) conversion_dict = { \"ns to ps\": 1000, \"ps", "data_3[0] == 20 assert data_3[-1] == 79 def test_analyze_data(): x = np.arange(100) y", "y_label, outfile) line_1 = \"Maximum of free energy: 199.000 kT, which occurs at", "\"kT to kJ/mol\": c1, \"kJ/mol to kT\": 1 / c1, \"kT to kcal/mol\":", "input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1:", "= np.pi / 180 c3 = 0.239005736 data = np.random.rand(100) conversion_dict = {", "assert data_1[0] == 20 assert data_2[-1] == 19 assert data_3[0] == 20 assert", "data is not a time series x_label = \"Dihedral (deg)\" y_label = \"Free", "i, f, T), expected ) def test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data)", "y1 = data_processing.read_2d_data(fes_file) # Case 2: not readable by np.loadtxt x2, y2 =", "lines os.remove(outfile) # Test 2: When input data is a time series x_label", "line_1 = \"Maximum of free energy: 199.000 kT, which occurs at 99.000 deg.\\n\"", "a typical time seris y1 = [1, 2, 3, 4, 5, 6, 7,", "= data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] ==", "+ \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable", "\"\"\" import os import numpy as np import MD_plotting_toolkit.data_processing as data_processing current_path =", "8, 4, 3] # not the x-data for a typical time seris y1", "tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as np import", "np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1", "as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\")", "x-data for a typical time seris y1 = [1, 2, 3, 4, 5,", "y_label, outfile) line_1 = ( \"The average of distance: 149.500 (RMSF: 0.193, max:", "y2 = data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) == [6,", "expected ) def test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data,", "<<EMAIL>> # # Copyright (c) 2021 University of Colorado Boulder # # #", "assert data_2[-1] == 19 assert data_3[0] == 20 assert data_3[-1] == 79 def", "2: When input data is a time series x_label = \"Time (ns)\" y_label", "int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2 def test_scale_data(): f = 2 T", "1000, \"kT to kJ/mol\": c1, \"kJ/mol to kT\": 1 / c1, \"kT to", "def test_scale_data(): f = 2 T = 300 c1 = 1.38064852 * 6.022", "-3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 =", "-0.00035355]) xx2 = np.array([0, 2, 4, 6, 8]) yy2 = np.array( [-20045.462891, -19989.603516,", "average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\" ) line_2 =", "data_3[-1] == 79 def test_analyze_data(): x = np.arange(100) y = np.arange(100, 200) outfile", "x3, y3 = x3[:5], y3[:5] # Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895,", "3000 assert len(x3) == 1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2)", "a time series x_label = \"Dihedral (deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x,", "y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2, 7, 8,", "(149.000 nm) at 49.000 ns is closet to the average.\\n\" texts = [line_1,", "x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare the first 5", "hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): #", "np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4, 6, 8]) yy2", "\"kJ/mol to kT\": 1 / c1, \"kT to kcal/mol\": c1 * c3, \"kcal/mol", "= 300 c1 = 1.38064852 * 6.022 * T / 1000 c2 =", "-2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1)", "data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20", "x2, y2 = x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5] # Expected results", "-0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4, 6, 8]) yy2 =", "== [6, 2, 7, 8, 4, 3] assert list(y1) == [3, 4, 5,", "ns\": 1 / 1000, \"kT to kJ/mol\": c1, \"kJ/mol to kT\": 1 /", "assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1 assert", "\"Dihedral (deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1", "the data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted)", "Written by <NAME> <<EMAIL>> # # Copyright (c) 2021 University of Colorado Boulder", "* 1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2 def test_scale_data(): f", "xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355,", "== lines os.remove(outfile) # Test 2: When input data is a time series", "to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 / c3, \"degree to radian\": c2,", "len(x2) == 3000 assert len(y2) == 3000 assert len(x3) == 1501 assert len(y3)", "to cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3", "np.arange(100) y = np.arange(100, 200) outfile = output_path + \"/test_output.txt\" # Test 1:", "readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case 2: not readable by", "at 99.000 ns.\\n\" line_3 = \"The minimum of distance occurs at 0.000 ns.\\n\"", "data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file", "for the module `MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as np import MD_plotting_toolkit.data_processing", "np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709,", "== 3000 assert len(y2) == 3000 assert len(x3) == 1501 assert len(y3) ==", "int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1)", "Copyright (c) 2021 University of Colorado Boulder # # # #################################################################### \"\"\" Unit", "c3, \"kcal/mol to kT\": 1 / (c1 * c3), \"kJ/mol to kcal/mol\": c3,", "7, 8, 4, 3] # not the x-data for a typical time seris", "= np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4, 6, 8])", "/ c1, \"kT to kcal/mol\": c1 * c3, \"kcal/mol to kT\": 1 /", "visualize the results obtained from MD # # # # Written by <NAME>", "np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3", "maximum of distance occurs at 99.000 ns.\\n\" line_3 = \"The minimum of distance", "x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3 =", "5, 6, 7, 8] # Below we test from reading the file to", "1000, \"ps to ns\": 1 / 1000, \"kT to kJ/mol\": c1, \"kJ/mol to", "= input_path + \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path +", "expected = data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected", "0.000 ns.\\n\" line_4 = \"The distance (149.000 nm) at 49.000 ns is closet", "[3, 4, 5, 6, 7, 8] assert len(x2) == 3000 assert len(y2) ==", "[6, 2, 7, 8, 4, 3] assert list(y1) == [3, 4, 5, 6,", "at 0.000 ns.\\n\" line_4 = \"The distance (149.000 nm) at 49.000 ns is", "+ \"/test_output.txt\" # Test 1: When input data is not a time series", "y2 = x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5] # Expected results xx1", "x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1) x2,", "y2[:5] x3, y3 = x3[:5], y3[:5] # Expected results xx1 = np.array([-3.14159265, -3.0787608,", "to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected", "= \"Maximum of free energy: 199.000 kT, which occurs at 99.000 deg.\\n\" line_2", "of free energy: 199.000 kT, which occurs at 99.000 deg.\\n\" line_2 = \"Minimum", "`MD_plotting_toolkit.data_processing`. \"\"\" import os import numpy as np import MD_plotting_toolkit.data_processing as data_processing current_path", "\"The minimum of distance occurs at 0.000 ns.\\n\" line_4 = \"The distance (149.000", "test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 =", "2, 4, 6, 8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] )", "c1 = 1.38064852 * 6.022 * T / 1000 c2 = np.pi /", "= np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623,", "-3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] )", "= x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5] # Expected results xx1 =", "(ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = (", "time series x_label = \"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label,", "list(y1) == [3, 4, 5, 6, 7, 8] assert len(x2) == 3000 assert", "== (len(x2) - 1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1) *", "f, T), expected ) def test_slice_data(): data = np.arange(100) data_unchaged = data_processing.slice_data(data) data_1", "# Below we test from reading the file to cleaning the data x2,", "we test from reading the file to cleaning the data x2, y2 =", "= data_processing.read_2d_data(fes_file) # Case 2: not readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file)", "= np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895,", "np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case 2: not readable by np.loadtxt x2,", "-2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1,", "# Test 2: When input data is a time series x_label = \"Time", "= data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare the first 5 elements to", "np import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path", "assert len(y2) == 3000 assert len(x3) == 1501 assert len(y3) == 1501 assert", "ns is closet to the average.\\n\" texts = [line_1, line_2, line_3, line_4] infile", "line_3, line_4] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is", "\"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\" ) line_2", "/ (c1 * c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 /", "package to visualize the results obtained from MD # # # # Written", "(len(x3) - 1) * 2 def test_scale_data(): f = 2 T = 300", "which occurs at 99.000 deg.\\n\" line_2 = \"Minimum of free energy: 100.000 kT,", "elements to save up some space x1, y1 = x1[:5], y1[:5] x2, y2", "y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = \"Maximum", "+ \"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted", "np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3, y3 =", "== [3, 4, 5, 6, 7, 8] assert len(x2) == 3000 assert len(y2)", "= \"Dihedral (deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile)", "= np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2,", "data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20,", "= np.arange(100, 200) outfile = output_path + \"/test_output.txt\" # Test 1: When input", "list(x1) == [6, 2, 7, 8, 4, 3] assert list(y1) == [3, 4,", "= data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data,", "x2, y2 = data_processing.read_2d_data(potential_file) # Case 3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file,", "= open(outfile, \"r\") lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert texts", "\"ps to ns\": 1 / 1000, \"kT to kJ/mol\": c1, \"kJ/mol to kT\":", "x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5] # Expected results xx1 = np.array([-3.14159265,", "x_label = \"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile)", "\"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file = input_path", "49.000 ns is closet to the average.\\n\" texts = [line_1, line_2, line_3, line_4]", "-0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4, 6, 8]) yy2 = np.array(", "data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare the first 5 elements to save", "1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2 def test_scale_data():", "1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected = data", "= \"The maximum of distance occurs at 99.000 ns.\\n\" line_3 = \"The minimum", "-3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338] ) np.testing.assert_array_almost_equal(x1,", "average.\\n\" texts = [line_1, line_2, line_3, line_4] infile = open(outfile, \"r\") lines =", "minimum of distance occurs at 0.000 ns.\\n\" line_4 = \"The distance (149.000 nm)", "y3 = x3[:5], y3[:5] # Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709,", "assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3) -", "* f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected ) def test_slice_data(): data =", "potential_file = input_path + \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path", "c1, \"kT to kcal/mol\": c1 * c3, \"kcal/mol to kT\": 1 / (c1", "data_2[-1] == 19 assert data_3[0] == 20 assert data_3[-1] == 79 def test_analyze_data():", "\"Minimum of free energy: 100.000 kT, which occurs at 0.000 deg.\\n\" texts =", "# # MD_plotting_toolkit, # # a python package to visualize the results obtained", "y, x_label, y_label, outfile) line_1 = ( \"The average of distance: 149.500 (RMSF:", "the average.\\n\" texts = [line_1, line_2, line_3, line_4] infile = open(outfile, \"r\") lines", "data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1", "* c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol to kJ/mol\": 1 / c3, \"degree", "import os import numpy as np import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__))", "/ 1000, \"kT to kJ/mol\": c1, \"kJ/mol to kT\": 1 / c1, \"kT", "os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted", "for a typical time seris y1 = [1, 2, 3, 4, 5, 6,", "input data is not a time series x_label = \"Dihedral (deg)\" y_label =", "to save up some space x1, y1 = x1[:5], y1[:5] x2, y2 =", "199.000, min: 100.000)\\n\" ) line_2 = \"The maximum of distance occurs at 99.000", "# Test 1: When input data is not a time series x_label =", "MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path,", "np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 = [2,", "Case 2: not readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case 3:", "time series x_label = \"Dihedral (deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y,", "<NAME> <<EMAIL>> # # Copyright (c) 2021 University of Colorado Boulder # #", "texts = [line_1, line_2] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert", "# a python package to visualize the results obtained from MD # #", "data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert data_2[-1] == 19", "data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2,", "3000 assert len(y2) == 3000 assert len(x3) == 1501 assert len(y3) == 1501", "is True assert texts == lines os.remove(outfile) # Test 2: When input data", "/ 180 c3 = 0.239005736 data = np.random.rand(100) conversion_dict = { \"ns to", "8, 4, 3] assert list(y1) == [3, 4, 5, 6, 7, 8] assert", "Boulder # # # #################################################################### \"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\"", "to ps\": 1000, \"ps to ns\": 1 / 1000, \"kT to kJ/mol\": c1,", "[-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])", "= np.arange(100) data_unchaged = data_processing.slice_data(data) data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20)", "from MD # # # # Written by <NAME> <<EMAIL>> # # Copyright", "x1, y1 = x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5] x3, y3 =", "xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543, -8765.49326731, -8748.15371253,", "3] assert list(y1) == [3, 4, 5, 6, 7, 8] assert len(x2) ==", "0.239005736 data = np.random.rand(100) conversion_dict = { \"ns to ps\": 1000, \"ps to", "Case 3: Non-default col_idx x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we only", "\"kT to kcal/mol\": c1 * c3, \"kcal/mol to kT\": 1 / (c1 *", "= [2, 4, 6, 2, 7, 8, 4, 3] # not the x-data", "data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2, 7, 8, 4, 3] assert list(y1)", "= x1[:5], y1[:5] x2, y2 = x2[:5], y2[:5] x3, y3 = x3[:5], y3[:5]", "os import numpy as np import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path", "test_deduplicate_data(): x1 = [2, 4, 6, 2, 7, 8, 4, 3] # not", "to visualize the results obtained from MD # # # # Written by", ") np.testing.assert_array_almost_equal(x1, xx1) np.testing.assert_array_almost_equal(y1, yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3)", "1: readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case 2: not readable", "file to cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3,", "\"Time (ns)\" y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 =", "# # a python package to visualize the results obtained from MD #", "in conversion_dict: expected = data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f,", "yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4, 6,", "at 99.000 deg.\\n\" line_2 = \"Minimum of free energy: 100.000 kT, which occurs", "occurs at 99.000 ns.\\n\" line_3 = \"The minimum of distance occurs at 0.000", "def test_read_2d_data(): # Case 1: readable by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) #", "= 2 T = 300 c1 = 1.38064852 * 6.022 * T /", "occurs at 99.000 deg.\\n\" line_2 = \"Minimum of free energy: 100.000 kT, which", "\"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = \"Maximum of free", "truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert data_1[0] == 20 assert data_2[-1] == 19 assert data_3[0]", "+ \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted = input_path + \"/corrupted_dhdl.xvg\" def", "y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output", "we only compare the first 5 elements to save up some space x1,", "Colorado Boulder # # # #################################################################### \"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`.", "= data_processing.deduplicate_data(x1, y1) x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3)", "[line_1, line_2, line_3, line_4] infile = open(outfile, \"r\") lines = infile.readlines() infile.close() assert", "is closet to the average.\\n\" texts = [line_1, line_2, line_3, line_4] infile =", "149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\" ) line_2 = \"The maximum of", "6, 7, 8] assert len(x2) == 3000 assert len(y2) == 3000 assert len(x3)", "7, 8, 4, 3] assert list(y1) == [3, 4, 5, 6, 7, 8]", "= os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path + \"/fes.dat\" potential_file", "to kcal/mol\": c1 * c3, \"kcal/mol to kT\": 1 / (c1 * c3),", "(nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = ( \"The average of distance:", "assert data_3[0] == 20 assert data_3[-1] == 79 def test_analyze_data(): x = np.arange(100)", "xx2 = np.array([0, 2, 4, 6, 8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859,", "4, 5, 6, 7, 8] # Below we test from reading the file", "seris y1 = [1, 2, 3, 4, 5, 6, 7, 8] # Below", "test from reading the file to cleaning the data x2, y2 = data_processing.read_2d_data(hills_corrupted)", "degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in conversion_dict: expected =", "np.arange(100, 200) outfile = output_path + \"/test_output.txt\" # Test 1: When input data", "University of Colorado Boulder # # # #################################################################### \"\"\" Unit tests for the", "time seris y1 = [1, 2, 3, 4, 5, 6, 7, 8] #", "distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\\n\" ) line_2 = \"The maximum", "x_label = \"Dihedral (deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label,", "c2, \"radian to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data) for i in", "* 6.022 * T / 1000 c2 = np.pi / 180 c3 =", "input_path + \"/corrupted_dhdl.xvg\" def test_read_2d_data(): # Case 1: readable by np.loadtxt x1, y1", "conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected ) def test_slice_data(): data", "- 1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2 def", "= np.array([0, 2, 4, 6, 8]) yy2 = np.array( [-20045.462891, -19989.603516, -19909.130859, -20057.402344,", "x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS", "2021 University of Colorado Boulder # # # #################################################################### \"\"\" Unit tests for", "= data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2 =", "test_scale_data(): f = 2 T = 300 c1 = 1.38064852 * 6.022 *", "texts == lines os.remove(outfile) # Test 2: When input data is a time", "3, 4, 5, 6, 7, 8] # Below we test from reading the", "to the average.\\n\" texts = [line_1, line_2, line_3, line_4] infile = open(outfile, \"r\")", "- 1) * 2 def test_scale_data(): f = 2 T = 300 c1", "5 elements to save up some space x1, y1 = x1[:5], y1[:5] x2,", "100.000 kT, which occurs at 0.000 deg.\\n\" texts = [line_1, line_2] infile =", "x2, y2 = data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) ==", "data_1 = data_processing.slice_data(data, truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20)", "5, 6, 7, 8] assert len(x2) == 3000 assert len(y2) == 3000 assert", "\"kcal/mol to kT\": 1 / (c1 * c3), \"kJ/mol to kcal/mol\": c3, \"kcal/mol", "first 5 elements to save up some space x1, y1 = x1[:5], y1[:5]", "y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1) x2, y2", "y_label = \"Distance (nm)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 = ( \"The", "* conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected ) def test_slice_data():", "# # # #################################################################### \"\"\" Unit tests for the module `MD_plotting_toolkit.data_processing`. \"\"\" import", "yy3) def test_deduplicate_data(): x1 = [2, 4, 6, 2, 7, 8, 4, 3]", "the results obtained from MD # # # # Written by <NAME> <<EMAIL>>", "\"ns to ps\": 1000, \"ps to ns\": 1 / 1000, \"kT to kJ/mol\":", "x_label, y_label, outfile) line_1 = \"Maximum of free energy: 199.000 kT, which occurs", "x = np.arange(100) y = np.arange(100, 200) outfile = output_path + \"/test_output.txt\" #", "y, x_label, y_label, outfile) line_1 = \"Maximum of free energy: 199.000 kT, which", "not the x-data for a typical time seris y1 = [1, 2, 3,", "= data * conversion_dict[i] * f np.testing.assert_array_almost_equal( data_processing.scale_data(data, i, f, T), expected )", "is not a time series x_label = \"Dihedral (deg)\" y_label = \"Free energy", "truncate=20) data_2 = data_processing.slice_data(data, truncate_b=20) data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20) np.testing.assert_equal(data, data_unchaged) assert", "conversion_dict = { \"ns to ps\": 1000, \"ps to ns\": 1 / 1000,", "(deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label, y_label, outfile) line_1 =", "= \"The minimum of distance occurs at 0.000 ns.\\n\" line_4 = \"The distance", "kcal/mol\": c1 * c3, \"kcal/mol to kT\": 1 / (c1 * c3), \"kJ/mol", "assert list(y1) == [3, 4, 5, 6, 7, 8] assert len(x2) == 3000", "8] assert len(x2) == 3000 assert len(y2) == 3000 assert len(x3) == 1501", "kT, which occurs at 99.000 deg.\\n\" line_2 = \"Minimum of free energy: 100.000", "yy1) np.testing.assert_array_almost_equal(x2, xx2) np.testing.assert_array_almost_equal(y2, yy2) np.testing.assert_array_almost_equal(x3, xx3) np.testing.assert_array_almost_equal(y3, yy3) def test_deduplicate_data(): x1 =", "Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355,", "\"degree to radian\": c2, \"radian to degree\": 1 / c2, } np.testing.assert_array_almost_equal(data_processing.scale_data(data), data)", "1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1", "lines = infile.readlines() infile.close() assert os.path.isfile(outfile) is True assert texts == lines os.remove(outfile)", "# # # Written by <NAME> <<EMAIL>> # # Copyright (c) 2021 University", "series x_label = \"Dihedral (deg)\" y_label = \"Free energy (kT)\" data_processing.analyze_data(x, y, x_label,", "data_unchaged) assert data_1[0] == 20 assert data_2[-1] == 19 assert data_3[0] == 20", "assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2 def test_scale_data(): f = 2", "[1, 2, 3, 4, 5, 6, 7, 8] # Below we test from", "0.193, max: 199.000, min: 100.000)\\n\" ) line_2 = \"The maximum of distance occurs", "99.000 ns.\\n\" line_3 = \"The minimum of distance occurs at 0.000 ns.\\n\" line_4", "8] # Below we test from reading the file to cleaning the data", "typical time seris y1 = [1, 2, 3, 4, 5, 6, 7, 8]", "os.path.dirname(os.path.abspath(__file__)) input_path = os.path.join(current_path, \"sample_inputs\") output_path = os.path.join(current_path, \"sample_outputs\") fes_file = input_path +", "When input data is a time series x_label = \"Time (ns)\" y_label =", "-19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3 = np.array( [-8778.4411543,", "output x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output x1, y1 = data_processing.deduplicate_data(x1, y1)", "-2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4,", "y1 = [1, 2, 3, 4, 5, 6, 7, 8] # Below we", "input data is a time series x_label = \"Time (ns)\" y_label = \"Distance", "y3 = data_processing.read_2d_data(fes_file, col_idx=4) # Here we only compare the first 5 elements", "(len(x2) - 1) * 1 assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2", "-0.00035355, -0.00035355]) xx2 = np.array([0, 2, 4, 6, 8]) yy2 = np.array( [-20045.462891,", "= [1, 2, 3, 4, 5, 6, 7, 8] # Below we test", "distance occurs at 99.000 ns.\\n\" line_3 = \"The minimum of distance occurs at", "closet to the average.\\n\" texts = [line_1, line_2, line_3, line_4] infile = open(outfile,", "(RMSF: 0.193, max: 199.000, min: 100.000)\\n\" ) line_2 = \"The maximum of distance", "y3[:5] # Expected results xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 =", "T / 1000 c2 = np.pi / 180 c3 = 0.239005736 data =", "data_processing.deduplicate_data(x2, y2) x3, y3 = data_processing.deduplicate_data(x3, y3) assert list(x1) == [6, 2, 7,", "x_label, y_label, outfile) line_1 = ( \"The average of distance: 149.500 (RMSF: 0.193,", "np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2", "== 3000 assert len(x3) == 1501 assert len(y3) == 1501 assert int(np.sum(np.diff(x2))) ==", "ps\": 1000, \"ps to ns\": 1 / 1000, \"kT to kJ/mol\": c1, \"kJ/mol", "ns.\\n\" line_3 = \"The minimum of distance occurs at 0.000 ns.\\n\" line_4 =", "# Case 2: not readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) # Case", "save up some space x1, y1 = x1[:5], y1[:5] x2, y2 = x2[:5],", "import numpy as np import MD_plotting_toolkit.data_processing as data_processing current_path = os.path.dirname(os.path.abspath(__file__)) input_path =", "-19989.603516, -19909.130859, -20057.402344, -19812.580078] ) xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]) yy3", "MD_plotting_toolkit, # # a python package to visualize the results obtained from MD", "assert list(x1) == [6, 2, 7, 8, 4, 3] assert list(y1) == [3,", "\"/fes.dat\" potential_file = input_path + \"/potential.xvg\" hills_corrupted = input_path + \"/corrupted_HILLS\" dhdl_corrupted =", "\"The maximum of distance occurs at 99.000 ns.\\n\" line_3 = \"The minimum of", "kT, which occurs at 0.000 deg.\\n\" texts = [line_1, line_2] infile = open(outfile,", "# Here we only compare the first 5 elements to save up some", "c3, \"kcal/mol to kJ/mol\": 1 / c3, \"degree to radian\": c2, \"radian to", "= { \"ns to ps\": 1000, \"ps to ns\": 1 / 1000, \"kT", "outfile) line_1 = ( \"The average of distance: 149.500 (RMSF: 0.193, max: 199.000,", "data_processing.read_2d_data(fes_file) # Case 2: not readable by np.loadtxt x2, y2 = data_processing.read_2d_data(potential_file) #", "by np.loadtxt x1, y1 = data_processing.read_2d_data(fes_file) # Case 2: not readable by np.loadtxt", "-3.01592895, -2.95309709, -2.89026524]) yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]) xx2 = np.array([0,", "# # # # Written by <NAME> <<EMAIL>> # # Copyright (c) 2021", "energy: 100.000 kT, which occurs at 0.000 deg.\\n\" texts = [line_1, line_2] infile" ]
[ "return_complex \"\"\" prepare window parameter type of window - \"hann\": hanning window -", "self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0 if", "== \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window =", "if fmin is None else fmin fmax = fs / 2 if fmax", "Distributed under terms of the MIT license. \"\"\" \"\"\" import librosa import scipy.signal", "hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size", "fmax = fs / 2 if fmax is None else fmax mel_basis =", "python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright (c) 2021", "return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\",", "= hop_size self.fft_size = fft_size self.win_length = fft_size if win_length is None else", "MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter(", "-*- # vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed", "self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def", "= window if window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win),", "of window - \"hann\": hanning window - \"param\": parameter-based window - \"conv\": convolution-based", "- \"hann\": hanning window - \"param\": parameter-based window - \"conv\": convolution-based window \"\"\"", "= fft_size if win_length is None else win_length self.center = center self.pad_mode =", "self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self, x): stft = self.stft_layer(x)", "== \"conv\": kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1,", ") self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb =", "def forward(self, x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2 +", "scipy.signal import torch import torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050,", "mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not None: mlfb = self.scaler_layer(mlfb) return mlfb", "nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x): return (x - self.mean) /", "\"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x): return (x - self.mean)", "if fmax is None else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin,", "fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size =", "= center self.pad_mode = pad_mode self.return_complex = return_complex \"\"\" prepare window parameter type", "class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False,", "MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler)", "x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1]", "fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size", "forward(self, x): return (x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__( self,", "/ 2 if fmax is None else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size,", "win_length self.center = center self.pad_mode = pad_mode self.return_complex = return_complex \"\"\" prepare window", "fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__()", "return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False),", "): super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, )", "self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ):", "pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler is not", "__init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size", "fmin = 0 if fmin is None else fmin fmax = fs /", "librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def", "self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer =", "parameter type of window - \"hann\": hanning window - \"param\": parameter-based window -", "def forward( self, x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10()", "self, x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb", "getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length,", "stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return", "n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0 if fmin is None", "= torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude)", "window == \"conv\": kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size,", "nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\": kernel_size = 65 self.window_conv = nn.Sequential(", "== \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window", "x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else: f =", "window = f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window,", "eps=1.0e-10 ): super().__init__() fmin = 0 if fmin is None else fmin fmax", "fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0 if fmin is None else", "parameter-based window - \"conv\": convolution-based window \"\"\" self.window_type = window if window ==", "Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed under terms of the MIT", "if self.window_type == \"param\": window = self.window elif self.window_type == \"conv\": x =", "fmax) if scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None", "fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0 if fmin is None else fmin", "= fs / 2 if fmax is None else fmax mel_basis = librosa.filters.mel(", "self.window elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2),", "** 2 + stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is", "= torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__(", "self.window_type == \"param\": window = self.window elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1,", "is None else win_length self.center = center self.pad_mode = pad_mode self.return_complex = return_complex", "None else fmin fmax = fs / 2 if fmax is None else", "kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size -", "MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin", "= MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler is not None: self.scaler_layer =", "= f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center,", "fft_size, n_mels, fmin, fmax) if scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler) else:", "= eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb = torch.matmul(x, self.mel_basis)", "nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x):", "prepare window parameter type of window - \"hann\": hanning window - \"param\": parameter-based", "\"\"\" self.window_type = window if window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter(", "pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter(", "of the MIT license. \"\"\" \"\"\" import librosa import scipy.signal import torch import", "= window def forward(self, x): if self.window_type == \"param\": window = self.window elif", "(c) 2021 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license.", "stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), )", "import torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None,", "# -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright (c) 2021 <NAME>", "fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0 if fmin", "stride=1, padding=(kernel_size - 1) // 2, ), nn.Sigmoid(), ) else: self.window = window", "): super().__init__() self.hop_size = hop_size self.fft_size = fft_size self.win_length = fft_size if win_length", "if win_length is None else win_length self.center = center self.pad_mode = pad_mode self.return_complex", "<NAME> <<EMAIL>> # # Distributed under terms of the MIT license. \"\"\" \"\"\"", "is None else fmin fmax = fs / 2 if fmax is None", "- \"param\": parameter-based window - \"conv\": convolution-based window \"\"\" self.window_type = window if", "fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0 if fmin is", "\"conv\": convolution-based window \"\"\" self.window_type = window if window == \"param\": win =", "= pad_mode self.return_complex = return_complex \"\"\" prepare window parameter type of window -", "2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter(", "= torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024,", "MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self, x): stft = self.stft_layer(x) amplitude =", "None else win_length self.center = center self.pad_mode = pad_mode self.return_complex = return_complex \"\"\"", "pad_mode self.return_complex = return_complex \"\"\" prepare window parameter type of window - \"hann\":", "fmin is None else fmin fmax = fs / 2 if fmax is", "dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex,", "torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else: f = getattr(torch, f\"{self.window}_window\") window =", "forward(self, x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[...,", "def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(),", "n_mels, fmin, fmax) if scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer", "** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not None: mlfb = self.scaler_layer(mlfb)", "STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ):", "win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer(", "scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\": kernel_size =", "MIT license. \"\"\" \"\"\" import librosa import scipy.signal import torch import torch.nn as", "__init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None,", "window = None else: f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device)", "None else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps", ") elif window == \"conv\": kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1,", "# Distributed under terms of the MIT license. \"\"\" \"\"\" import librosa import", "vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed under terms", "= STFTLayer( fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs,", "self.return_complex = return_complex \"\"\" prepare window parameter type of window - \"hann\": hanning", "fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels,", "= torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1,", "__init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin = 0", "forward(self, x): if self.window_type == \"param\": window = self.window elif self.window_type == \"conv\":", "fft_size if win_length is None else win_length self.center = center self.pad_mode = pad_mode", "mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256,", "else win_length self.center = center self.pad_mode = pad_mode self.return_complex = return_complex \"\"\" prepare", "x): if self.window_type == \"param\": window = self.window elif self.window_type == \"conv\": x", "requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x): return", "fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ):", "nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ):", "n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x,", "- self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None,", "n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module):", "None def forward(self, x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2", "= fft_size self.win_length = fft_size if win_length is None else win_length self.center =", "-1) window = None else: f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype,", "# # Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed under terms of", "window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler):", "min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\",", "hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin,", "not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self, x): stft", "self.window = window def forward(self, x): if self.window_type == \"param\": window = self.window", "scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode,", "win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if", "x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else:", "fs / 2 if fmax is None else fmax mel_basis = librosa.filters.mel( sr=fs,", "device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, )", "return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size = fft_size self.win_length = fft_size if", "under terms of the MIT license. \"\"\" \"\"\" import librosa import scipy.signal import", "== \"param\": window = self.window elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2)", "1) // 2, ), nn.Sigmoid(), ) else: self.window = window def forward(self, x):", "= self.window elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1,", "center self.pad_mode = pad_mode self.return_complex = return_complex \"\"\" prepare window parameter type of", "in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, ), nn.Sigmoid(), ) else:", "#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright", "= librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float())", "self.win_length = fft_size if win_length is None else win_length self.center = center self.pad_mode", "torch import torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80,", "import librosa import scipy.signal import torch import torch.nn as nn class MLFBLayer(torch.nn.Module): def", "<reponame>abeersaqib/crank<filename>crank/net/module/mlfb.py #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # #", "hop_size self.fft_size = fft_size self.win_length = fft_size if win_length is None else win_length", "window if window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True)", ") def forward(self, x): return (x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def", "win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\":", "fmax is None else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax,", "win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size = fft_size", ") return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(),", "0 if fmin is None else fmin fmax = fs / 2 if", "= None else: f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft", "eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb", "torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb,", "fmin fmax = fs / 2 if fmax is None else fmax mel_basis", "else fmin fmax = fs / 2 if fmax is None else fmax", "pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size,", "window parameter type of window - \"hann\": hanning window - \"param\": parameter-based window", "/ self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True,", "torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__( self,", "def forward(self, x): if self.window_type == \"param\": window = self.window elif self.window_type ==", "window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs,", "= return_complex \"\"\" prepare window parameter type of window - \"hann\": hanning window", "else: self.scaler_layer = None def forward(self, x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[...,", "\"conv\": kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size", "), ) def forward(self, x): return (x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module):", "def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__()", "the MIT license. \"\"\" \"\"\" import librosa import scipy.signal import torch import torch.nn", "fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps", "= torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else: f = getattr(torch, f\"{self.window}_window\") window", "class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__()", "torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None,", "def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10 ): super().__init__() fmin =", "amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2) mlfb =", "self.window_type = window if window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\",", "= MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self, x): stft = self.stft_layer(x) amplitude", "x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class", "librosa import scipy.signal import torch import torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__(", "elif window == \"conv\": kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24,", "<<EMAIL>> # # Distributed under terms of the MIT license. \"\"\" \"\"\" import", "window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif", "else: self.window = window def forward(self, x): if self.window_type == \"param\": window =", "\"\"\" import librosa import scipy.signal import torch import torch.nn as nn class MLFBLayer(torch.nn.Module):", "self.center = center self.pad_mode = pad_mode self.return_complex = return_complex \"\"\" prepare window parameter", "def forward(self, x): return (x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__(", "STFTLayer( fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size,", "as nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10", "mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\",", "return mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True,", "f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size,", "- \"conv\": convolution-based window \"\"\" self.window_type = window if window == \"param\": win", "hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self,", "self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb =", "elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1)", "): mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module):", "\"\"\" prepare window parameter type of window - \"hann\": hanning window - \"param\":", "self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x): return (x -", "type of window - \"hann\": hanning window - \"param\": parameter-based window - \"conv\":", "fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax)", "self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2,", "1] ** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not None: mlfb =", "/usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright (c)", "nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, ), nn.Sigmoid(), )", "center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__()", "\"param\": parameter-based window - \"conv\": convolution-based window \"\"\" self.window_type = window if window", "window def forward(self, x): if self.window_type == \"param\": window = self.window elif self.window_type", "window - \"param\": parameter-based window - \"conv\": convolution-based window \"\"\" self.window_type = window", "2), -1) window = None else: f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length,", ") self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler is not None:", "super().__init__() fmin = 0 if fmin is None else fmin fmax = fs", "= 0 if fmin is None else fmin fmax = fs / 2", "- 1) // 2, ), nn.Sigmoid(), ) else: self.window = window def forward(self,", "stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] **", "= x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else: f", "self.hop_size = hop_size self.fft_size = fft_size self.win_length = fft_size if win_length is None", "window - \"hann\": hanning window - \"param\": parameter-based window - \"conv\": convolution-based window", "= nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, ),", "__init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False", ") else: self.window = window def forward(self, x): if self.window_type == \"param\": window", "n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length,", "fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length, window,", "else: f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft(", "None else: f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft =", "else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps =", "window \"\"\" self.window_type = window if window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float)", "torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x): return (x - self.mean) / self.std", "-*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>>", "utf-8 -*- # vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>> # #", "self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\",", "fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer =", "torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None,", "pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size = fft_size self.win_length = fft_size", "2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not None: mlfb = self.scaler_layer(mlfb) return", "scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ),", "self.fft_size = fft_size self.win_length = fft_size if win_length is None else win_length self.center", "mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def", "\"param\": window = self.window elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x", "nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, ), nn.Sigmoid(),", "= None def forward(self, x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] **", "2 + stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not", "mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\",", "class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80,", "is not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self, x):", "license. \"\"\" \"\"\" import librosa import scipy.signal import torch import torch.nn as nn", "\"\"\" \"\"\" import librosa import scipy.signal import torch import torch.nn as nn class", "coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>> #", "65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) //", "# Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed under terms of the", "super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length, window, center=center, pad_mode=pad_mode, ) self.mlfb_layer", "2 if fmax is None else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels,", "self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\",", "padding=(kernel_size - 1) // 2, ), nn.Sigmoid(), ) else: self.window = window def", "self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler is not None: self.scaler_layer", "fft_size self.win_length = fft_size if win_length is None else win_length self.center = center", "self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb = torch.matmul(x,", "fmin, fmax) if scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer =", "sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward(", "\"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self,", "torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float()", "# vim:fenc=utf-8 # # Copyright (c) 2021 <NAME> <<EMAIL>> # # Distributed under", "convolution-based window \"\"\" self.window_type = window if window == \"param\": win = scipy.signal.get_window(\"hann\",", "window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size = fft_size self.win_length", "2, ), nn.Sigmoid(), ) else: self.window = window def forward(self, x): if self.window_type", "), nn.Sigmoid(), ) else: self.window = window def forward(self, x): if self.window_type ==", "stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not None: mlfb", "// 2, ), nn.Sigmoid(), ) else: self.window = window def forward(self, x): if", "+ stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer is not None:", "self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window", "None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self, x): stft =", "nn.Sigmoid(), ) else: self.window = window def forward(self, x): if self.window_type == \"param\":", "win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode, return_complex=self.return_complex, ) return stft.transpose(1, 2).float() class MLFBScalerLayer(nn.Module): def", "\"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\": kernel_size = 65 self.window_conv =", "window = self.window elif self.window_type == \"conv\": x = x.unsqueeze(-1).transpose(1, 2) x =", "x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else: f = getattr(torch, f\"{self.window}_window\")", "self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\": kernel_size = 65 self.window_conv", "window - \"conv\": convolution-based window \"\"\" self.window_type = window if window == \"param\":", "self.pad_mode = pad_mode self.return_complex = return_complex \"\"\" prepare window parameter type of window", "2021 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license. \"\"\"", "requires_grad=False ), ) def forward(self, x): return (x - self.mean) / self.std class", "scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None def forward(self,", "import scipy.signal import torch import torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__( self,", "fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self, x, ): mlfb", "f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size, win_length=self.win_length, hop_length=self.hop_size, window=window, center=self.center, pad_mode=self.pad_mode,", "self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\": kernel_size = 65", "if window == \"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) )", "\"hann\": hanning window - \"param\": parameter-based window - \"conv\": convolution-based window \"\"\" self.window_type", "if scaler is not None: self.scaler_layer = MLFBScalerLayer(scaler) else: self.scaler_layer = None def", "hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer", ") self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), ) def forward(self, x): return (x", "hanning window - \"param\": parameter-based window - \"conv\": convolution-based window \"\"\" self.window_type =", "center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs, hop_size,", "0] ** 2 + stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude) if self.scaler_layer", "requires_grad=True) ) elif window == \"conv\": kernel_size = 65 self.window_conv = nn.Sequential( nn.Conv1d(", "\"param\": win = scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window ==", "= self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2)", "self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2) mlfb", "center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler is", "= 65 self.window_conv = nn.Sequential( nn.Conv1d( in_channels=1, out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1)", "LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None,", "kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, ), nn.Sigmoid(), ) else: self.window =", "forward( self, x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return", "n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, ) self.eps = eps self.register_buffer(\"mel_basis\", torch.from_numpy(mel_basis.T).float()) def forward( self,", "is None else fmax mel_basis = librosa.filters.mel( sr=fs, n_fft=fft_size, n_mels=n_mels, fmin=fmin, fmax=fmax, )", "self.scaler_layer = None def forward(self, x): stft = self.stft_layer(x) amplitude = torch.sqrt(stft[..., 0]", "def __init__( self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", n_mels=80, fmin=None, fmax=None,", "out_channels=24, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, ), nn.Sigmoid(), ) else: self.window", "super().__init__() self.hop_size = hop_size self.fft_size = fft_size self.win_length = fft_size if win_length is", "x, ): mlfb = torch.matmul(x, self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class", "(x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256, fft_size=1024,", "fmax=None, scaler=None, ): super().__init__() self.stft_layer = STFTLayer( fs, hop_size, fft_size, win_length, window, center=center,", "2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None else: f = getattr(torch,", "class MLFBScalerLayer(nn.Module): def __init__(self, scaler): super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\",", "x): return (x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050,", "win_length is None else win_length self.center = center self.pad_mode = pad_mode self.return_complex =", "= scipy.signal.get_window(\"hann\", self.win_length).astype(float) self.register_parameter( \"window\", nn.Parameter(torch.from_numpy(win), requires_grad=True) ) elif window == \"conv\": kernel_size", "import torch import torch.nn as nn class MLFBLayer(torch.nn.Module): def __init__( self, fs=22050, fft_size=1024,", "torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2) mlfb = self.mlfb_layer(amplitude) if", "): super().__init__() fmin = 0 if fmin is None else fmin fmax =", "center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size = hop_size self.fft_size = fft_size self.win_length =", "self.mel_basis) mlfb = torch.clamp(mlfb, min=self.eps).log10() return mlfb class STFTLayer(torch.nn.Module): def __init__( self, fs=22050,", "super().__init__() self.register_parameter( \"mean\", nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False), ) self.register_parameter( \"std\", nn.Parameter( torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False ), )", "return (x - self.mean) / self.std class LogMelFilterBankLayer(nn.Module): def __init__( self, fs=22050, hop_size=256,", "terms of the MIT license. \"\"\" \"\"\" import librosa import scipy.signal import torch", "= getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x, n_fft=self.fft_size,", "window, center=center, pad_mode=pad_mode, ) self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax) if scaler", "\"conv\": x = x.unsqueeze(-1).transpose(1, 2) x = torch.mean(self.window_conv(x).transpose(1, 2), -1) window = None", "self, fs=22050, hop_size=256, fft_size=1024, win_length=None, window=\"hann\", center=True, pad_mode=\"reflect\", return_complex=False, ): super().__init__() self.hop_size =", "f = getattr(torch, f\"{self.window}_window\") window = f(self.win_length, dtype=x.dtype, device=x.device) stft = torch.stft( x,", "# # Distributed under terms of the MIT license. \"\"\" \"\"\" import librosa" ]
[ "to run the main program\"\"\" def main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file)", "if key_index == len(key): key_index = 0 else: decrypted_message += symbol return decrypted_message", "r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key = input(\"Enter a key for the cipher:", "file.close() return the_string \"\"\"This takes in a string and writes that string to", "decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file)", "encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper()", "def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes in a string and", "key_index = 0 key = key.lower() for symbol in message: encrypted_index = alphabet.find(symbol)", "takes in a string and writes that string to a text file\"\"\" def", "a key for the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4]", "= input(\"Enter a key for the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file", "%= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index", "that string to a text file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message)", "in a string and writes that string to a text file\"\"\" def write_to_a_file(message,", "= alphabet.find(symbol) if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if", "+= 1 if key_index == len(key): key_index = 0 else: decrypted_message += symbol", "encrypted_index = alphabet.find(symbol) if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet)", "returns a string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string", "run the main program\"\"\" def main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message)", "write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run the main", "\"\"\"Call this to run the main program\"\"\" def main(): the_file = r\"message-cipher.txt\" message", "a string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This", "input(\"Enter a key for the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file =", "2, problem 2 \"\"\"This reads from a text file and returns a string", "decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower()", "print(decrypted_message) new_file = the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a", "text file and returns a string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read()", "a text file and returns a string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r')", "decrypts a message given a key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet =", "\"\"\"This decrypts a message given a key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet", "alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower() for symbol in message:", "for symbol in message: encrypted_index = alphabet.find(symbol) if encrypted_index != -1: encrypted_index -=", "-= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message", "= 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower() for symbol in message: encrypted_index", "\"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a key\"\"\" def decrypt(key,message): decrypted_message =", "'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower() for symbol in message: encrypted_index =", "the_string=file.read() file.close() return the_string \"\"\"This takes in a string and writes that string", "= \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower() for symbol", "\") decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file = new_file + \"-clear.txt\"", "<filename>vigenerdecipher.py<gh_stars>0 # <NAME> # Homework 2, problem 2 \"\"\"This reads from a text", "def decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key =", "symbol in message: encrypted_index = alphabet.find(symbol) if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index])", "alphabet.find(symbol) if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower():", "of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes in", "new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a key\"\"\" def decrypt(key,message):", "the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run the main program\"\"\"", "file.write(message) file.close() \"\"\"Call this to run the main program\"\"\" def main(): the_file =", "file and returns a string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close()", "the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key = input(\"Enter a key for", "= decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This", "encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper():", "-1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif", "alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message +=", "alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if key_index == len(key):", "def main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key = input(\"Enter a", "file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run the main program\"\"\" def", "in message: encrypted_index = alphabet.find(symbol) if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index", "= r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key = input(\"Enter a key for the", "symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if key_index == len(key): key_index =", "alphabet[encrypted_index].upper() key_index += 1 if key_index == len(key): key_index = 0 else: decrypted_message", "+ \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a key\"\"\" def decrypt(key,message): decrypted_message", "for the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file =", "decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if key_index", "writes that string to a text file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\")", "given a key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index =", "read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes in a string and writes", "return the_string \"\"\"This takes in a string and writes that string to a", "open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run the main program\"\"\" def main(): the_file", "print(message) key = input(\"Enter a key for the cipher: \") decrypted_message = decrypt(key,message)", "= open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run the main program\"\"\" def main():", "text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes in a string", "the_string \"\"\"This takes in a string and writes that string to a text", "\"\"\"This takes in a string and writes that string to a text file\"\"\"", "+= alphabet[encrypted_index].upper() key_index += 1 if key_index == len(key): key_index = 0 else:", "new_file = the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message", "key = key.lower() for symbol in message: encrypted_index = alphabet.find(symbol) if encrypted_index !=", "message given a key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index", "and returns a string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return", "message: encrypted_index = alphabet.find(symbol) if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %=", "a text file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this", "decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower() for", "to a text file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call", "+= alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if key_index ==", "a string and writes that string to a text file\"\"\" def write_to_a_file(message, the_file):", "this to run the main program\"\"\" def main(): the_file = r\"message-cipher.txt\" message =", "write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a key\"\"\" def decrypt(key,message): decrypted_message = \"\"", "= read_from_a_file(the_file) print(message) key = input(\"Enter a key for the cipher: \") decrypted_message", "Homework 2, problem 2 \"\"\"This reads from a text file and returns a", "string to a text file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close()", "reads from a text file and returns a string of the text\"\"\" def", "if encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message", "1 if key_index == len(key): key_index = 0 else: decrypted_message += symbol return", "problem 2 \"\"\"This reads from a text file and returns a string of", "program\"\"\" def main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key = input(\"Enter", "encrypted_index != -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message +=", "the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes in a", "file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run", "and writes that string to a text file\"\"\" def write_to_a_file(message, the_file): file =", "if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1", "key = input(\"Enter a key for the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message)", "= key.lower() for symbol in message: encrypted_index = alphabet.find(symbol) if encrypted_index != -1:", "symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if", "from a text file and returns a string of the text\"\"\" def read_from_a_file(the_file):", "the main program\"\"\" def main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key", "len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index] elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index +=", "key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key", "string of the text\"\"\" def read_from_a_file(the_file): file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes", "elif symbol.isupper(): decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if key_index == len(key): key_index", "text file\"\"\" def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to", "main program\"\"\" def main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key =", "= new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a key\"\"\" def", "2 \"\"\"This reads from a text file and returns a string of the", "string and writes that string to a text file\"\"\" def write_to_a_file(message, the_file): file", "decrypted_message += alphabet[encrypted_index].upper() key_index += 1 if key_index == len(key): key_index = 0", "read_from_a_file(the_file) print(message) key = input(\"Enter a key for the cipher: \") decrypted_message =", "a message given a key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz'", "file=open(the_file,'r') the_string=file.read() file.close() return the_string \"\"\"This takes in a string and writes that", "0 key = key.lower() for symbol in message: encrypted_index = alphabet.find(symbol) if encrypted_index", "<NAME> # Homework 2, problem 2 \"\"\"This reads from a text file and", "key for the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file", "# <NAME> # Homework 2, problem 2 \"\"\"This reads from a text file", "= the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given", "key_index += 1 if key_index == len(key): key_index = 0 else: decrypted_message +=", "new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a key\"\"\"", "main(): the_file = r\"message-cipher.txt\" message = read_from_a_file(the_file) print(message) key = input(\"Enter a key", "\"\"\"This reads from a text file and returns a string of the text\"\"\"", "cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file = new_file +", "a key\"\"\" def decrypt(key,message): decrypted_message = \"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0", "# Homework 2, problem 2 \"\"\"This reads from a text file and returns", "the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts a message given a", "the cipher: \") decrypted_message = decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file = new_file", "= 0 key = key.lower() for symbol in message: encrypted_index = alphabet.find(symbol) if", "key.lower() for symbol in message: encrypted_index = alphabet.find(symbol) if encrypted_index != -1: encrypted_index", "file.close() \"\"\"Call this to run the main program\"\"\" def main(): the_file = r\"message-cipher.txt\"", "\"\" alphabet = 'abcdefghijklmnopqrstuvwxyz' key_index = 0 key = key.lower() for symbol in", "message = read_from_a_file(the_file) print(message) key = input(\"Enter a key for the cipher: \")", "def write_to_a_file(message, the_file): file = open(the_file,\"w\") file.write(message) file.close() \"\"\"Call this to run the", "!= -1: encrypted_index -= alphabet.find(key[key_index]) encrypted_index %= len(alphabet) if symbol.islower(): decrypted_message += alphabet[encrypted_index]", "decrypt(key,message) print(decrypted_message) new_file = the_file[:-4] new_file = new_file + \"-clear.txt\" write_to_a_file(decrypted_message,new_file) \"\"\"This decrypts" ]
[ "this length. # img_size_flat = data.train.images[0].shape[0] # Tuple with height and width of", "for lrx in [x/10 for x in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer())", "the images are stored in one-dimensional arrays of this length. # img_size_flat =", "from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in", "optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx in [x/10", "optimizer \") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i", "cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################ #", "############################################### # MNIST Image Classification Using Linear Regression # ################################################ # 1.1 Load", "range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra", "# Get the first images from the Test-set. # images = data.test.images[0:9] #", "= tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction = tf.equal( y_pred_cls , y_true_cls)", "digits. num_classes = 10 data.test.cls = np.array([label.argmax() for label in data.test.labels]) ########################################### #", "Variables # ################################################### # 3.4 Optimization Iteration # feed_dict_test = { x :", "# old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) #######################################################", "# correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## #", "10 data.test.cls = np.array([label.argmax() for label in data.test.labels]) ########################################### # 1.5 Plot a", "are iterated # print(\"Gradient decent optimizer\") for lrx in [x/10 for x in", "y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for", "means how many of batchs are iterated # print(\"Gradient decent optimizer\") for lrx", "# cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################", "rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read MNIST data", "adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction = tf.equal( y_pred_cls ,", "Get the true classes for those images. cls_true = [np.argmax(oh) for oh in", "Optimization Iteration # feed_dict_test = { x : data.test.images, y_true : data.test.labels, y_true_cls", "Session # session = tf.Session() ############################################# # 3.2 Initialize Variables # ################################################### #", "tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables", "# feed_dict_test = { x : data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label)", "2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3", "the first images from the Test-set. # images = data.test.images[0:9] # Get the", "# lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder(", "for label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true", "data.test.labels] } ############################################# # 4.2 Performance Iteration#1 # # Number of iteration means", "one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the images are stored in one-dimensional arrays", "for each of 10 digits. num_classes = 10 data.test.cls = np.array([label.argmax() for label", "################################################ # 1.2 Download and read MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR)", "height and width of images used to reshape arrays. img_shape = (28,28) #", "in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate", "y_true_cls : [np.argmax(label) for label in data.test.labels] } ############################################# # 4.2 Performance Iteration#1", "classes for those images. cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ] ##############################################", "data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label) for label in data.test.labels] } #############################################", "Using Linear Regression # ################################################ # 1.1 Load the libraries # import sys", "= {x : x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train)", "with incremental batch size \") session.run(tf.global_variables_initializer()) for lrx in [x/10 for x in", "gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction =", ", y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow Session #", "############################################# # 4.2 Performance Iteration#1 # # Number of iteration means how many", "Tuple with height and width of images used to reshape arrays. img_shape =", "3.1 Create TensorFlow Session # session = tf.Session() ############################################# # 3.2 Initialize Variables", "img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ##############################################", "data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables # lr = tf.placeholder(tf.float32) x =", "[np.argmax(label) for label in data.test.labels] } ############################################# # 4.2 Performance Iteration#1 # #", "in one-dimensional arrays of this length. # img_size_flat = data.train.images[0].shape[0] # Tuple with", "def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train =", "for oh in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables # lr =", "in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix()", "one class for each of 10 digits. num_classes = 10 data.test.cls = np.array([label.argmax()", "= tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) #", "for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100)", "1.1 Load the libraries # import sys import matplotlib.pyplot as plt import tensorflow", "print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the accuracy. acc = session.run(accuracy , feed_dict=", "in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true = cls_true,", "feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): #", "############################################### # 2.3 Model # logits = tf.matmul(x, weights) + bias y_pred =", "= tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction = tf.equal(", "Create TensorFlow Session # session = tf.Session() ############################################# # 3.2 Initialize Variables #", "to reshape arrays. img_shape = (28,28) # Number of classes, one class for", "correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1", "import numpy as np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def", "sys import matplotlib.pyplot as plt import tensorflow as tf import numpy as np", "of images used to reshape arrays. img_shape = (28,28) # Number of classes,", "range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra", "= tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the", "from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch =", "for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]:", "for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100)", "= (28,28) # Number of classes, one class for each of 10 digits.", "True) tf.logging.set_verbosity(old_v) ####################################################### # the images are stored in one-dimensional arrays of this", "feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for label in data.test.labels] cls_pred = session.run(y_pred_cls,", "in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx", "and read MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot", "# 1.2 Download and read MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data", "cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer", "length. # img_size_flat = data.train.images[0].shape[0] # Tuple with height and width of images", "Performance Iteration#1 # # Number of iteration means how many of batchs are", "Placeholder variables # lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true", "[None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables # weights", "the true classes for those images. cls_true = [np.argmax(oh) for oh in data.test.labels[0:9]", "MNIST Image Classification Using Linear Regression # ################################################ # 1.1 Load the libraries", "in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables # lr = tf.placeholder(tf.float32) x", "of classes, one class for each of 10 digits. num_classes = 10 data.test.cls", "{x : x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def", "bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost Function #", "label in data.test.labels]) ########################################### # 1.5 Plot a few images # Get the", "# Use TensorFlow to compute the accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test)", "of 10 digits. num_classes = 10 data.test.cls = np.array([label.argmax() for label in data.test.labels])", "Get the first images from the Test-set. # images = data.test.images[0:9] # Get", "= data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch, lr: learning_rate, y_true : y_true_batch}", "Plot a few images # Get the first images from the Test-set. #", "= tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create", "print_confusion_matrix(): cls_true = [np.argmax(label) for label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict =", "logits = tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1)", "in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch", "as plt import tensorflow as tf import numpy as np from sklearn.metrics import", ": [np.argmax(label) for label in data.test.labels] } ############################################# # 4.2 Performance Iteration#1 #", "plt import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix", "tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the images", "[None]) ############################################## # 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes]))", "data.test.labels, y_true_cls : [np.argmax(label) for label in data.test.labels] } ############################################# # 4.2 Performance", "# ################################################ # 1.1 Load the libraries # import sys import matplotlib.pyplot as", "batchs are iterated # print(\"Gradient decent optimizer\") for lrx in [x/10 for x", "# logits = tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred,", "i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for", "= [np.argmax(oh) for oh in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables #", "x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch, lr: learning_rate, y_true", "matplotlib.pyplot as plt import tensorflow as tf import numpy as np from sklearn.metrics", "10 digits. num_classes = 10 data.test.cls = np.array([label.argmax() for label in data.test.labels]) ###########################################", "Model # logits = tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls =", "# 1.5 Plot a few images # Get the first images from the", "one-dimensional arrays of this length. # img_size_flat = data.train.images[0].shape[0] # Tuple with height", "2.1 Placeholder variables # lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat])", "for those images. cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ] ############################################## #", "[1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx in", "learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label)", "y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat,", "= [np.argmax(label) for label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm", "#print_confusion_matrix() print(\"Adagra optimizer \") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer())", "bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits = tf.matmul(x, weights) +", "for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \")", "= tf.argmax(y_pred, axis=1) # 2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits,", "{:d} iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download", "many of batchs are iterated # print(\"Gradient decent optimizer\") for lrx in [x/10", "import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size)", "+ bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost Function", "tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the images are", "for label in data.test.labels] } ############################################# # 4.2 Performance Iteration#1 # # Number", "tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow Session # session = tf.Session() #############################################", "measures # correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ##############################################", "accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy :", "arrays of this length. # img_size_flat = data.train.images[0].shape[0] # Tuple with height and", "num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables # weights =", "session = tf.Session() ############################################# # 3.2 Initialize Variables # ################################################### # 3.4 Optimization", "img_shape = (28,28) # Number of classes, one class for each of 10", "data.train.images[0].shape[0] # Tuple with height and width of images used to reshape arrays.", "tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder(", "i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch size \") session.run(tf.global_variables_initializer())", "= cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the accuracy. acc", "tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64,", "################################################ # 2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) #", "cls_true = [np.argmax(label) for label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test)", "tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits = tf.matmul(x, weights) + bias y_pred", "reshape arrays. img_shape = (28,28) # Number of classes, one class for each", ", feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy : {:2.1f}% with {:d} iterations,", "# Number of iteration means how many of batchs are iterated # print(\"Gradient", "tf.logging.set_verbosity(old_v) ####################################################### # the images are stored in one-dimensional arrays of this length.", "images used to reshape arrays. img_shape = (28,28) # Number of classes, one", "import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch,", "{:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read MNIST data # old_v", "tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy", "= tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost Function # cross_entropy =", "= data.test.images[0:9] # Get the true classes for those images. cls_true = [np.argmax(oh)", "true classes for those images. cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ]", "from the Test-set. # images = data.test.images[0:9] # Get the true classes for", "= session.run(accuracy , feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy : {:2.1f}% with", "decent optimizer\") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i", "# ################################################### # 3.4 Optimization Iteration # feed_dict_test = { x : data.test.images,", "of batchs are iterated # print(\"Gradient decent optimizer\") for lrx in [x/10 for", "################################################ # 1.1 Load the libraries # import sys import matplotlib.pyplot as plt", "# 2.3 Model # logits = tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits)", "3.2 Initialize Variables # ################################################### # 3.4 Optimization Iteration # feed_dict_test = {", ": data.test.labels, y_true_cls : [np.argmax(label) for label in data.test.labels] } ############################################# # 4.2", "= tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits = tf.matmul(x, weights) + bias", "# 3.1 Create TensorFlow Session # session = tf.Session() ############################################# # 3.2 Initialize", "classes, one class for each of 10 digits. num_classes = 10 data.test.cls =", "= tf.Session() ############################################# # 3.2 Initialize Variables # ################################################### # 3.4 Optimization Iteration", "Initialize Variables # ################################################### # 3.4 Optimization Iteration # feed_dict_test = { x", ": {:2.1f}% with {:d} iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################", "np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i", "img_size_flat = data.train.images[0].shape[0] # Tuple with height and width of images used to", "y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow Session", "range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate =", "cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables", "feed_dict_test = { x : data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label) for", "the libraries # import sys import matplotlib.pyplot as plt import tensorflow as tf", "iteration means how many of batchs are iterated # print(\"Gradient decent optimizer\") for", "axis=1) # 2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels =", "Iteration # feed_dict_test = { x : data.test.images, y_true : data.test.labels, y_true_cls :", "y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow Session # session", "MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True)", "# Number of classes, one class for each of 10 digits. num_classes =", "= tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## # 2.2", "Performance measures # correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))", "tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow", "# 2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true)", "2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance", "y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost Function # cross_entropy", "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow Session # session =", "lrx in [x/10 for x in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for", "print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch size \") session.run(tf.global_variables_initializer()) for lrx in", "{ x : data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label) for label in", "(28,28) # Number of classes, one class for each of 10 digits. num_classes", "with height and width of images used to reshape arrays. img_shape = (28,28)", "images # Get the first images from the Test-set. # images = data.test.images[0:9]", "\") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in", "lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true =", "1.5 Plot a few images # Get the first images from the Test-set.", "# session = tf.Session() ############################################# # 3.2 Initialize Variables # ################################################### # 3.4", "session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm)", "# 3.2 Initialize Variables # ################################################### # 3.4 Optimization Iteration # feed_dict_test =", "x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true", "Regression # ################################################ # 1.1 Load the libraries # import sys import matplotlib.pyplot", "logits, labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function #", "TensorFlow to compute the accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test) # Print", "tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist", "= np.array([label.argmax() for label in data.test.labels]) ########################################### # 1.5 Plot a few images", "# Print the accuracy. print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning rate", "for i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x :", "class for each of 10 digits. num_classes = 10 data.test.cls = np.array([label.argmax() for", "batch size \") session.run(tf.global_variables_initializer()) for lrx in [x/10 for x in range(5,0,-1)]: for", "numpy as np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size):", "in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate", "x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100)", "Linear Regression # ################################################ # 1.1 Load the libraries # import sys import", "tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost)", "stored in one-dimensional arrays of this length. # img_size_flat = data.train.images[0].shape[0] # Tuple", "data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the images are stored", "# 2.6 Performance measures # correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy =", "label in data.test.labels] } ############################################# # 4.2 Performance Iteration#1 # # Number of", "tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size=", "# 4.2 Performance Iteration#1 # # Number of iteration means how many of", "1.2 Download and read MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data =", "= feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for label in data.test.labels] cls_pred =", "iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and", "= tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits =", "tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction = tf.equal( y_pred_cls", "y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits=", "used to reshape arrays. img_shape = (28,28) # Number of classes, one class", "batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read MNIST data # old_v =", "= tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ############################################## # 3.1 Create TensorFlow Session # session = tf.Session()", "data.test.images[0:9] # Get the true classes for those images. cls_true = [np.argmax(oh) for", "learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read MNIST", "in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch, lr:", "images. cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder", "those images. cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ] ############################################## # 2.1", "Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model", "weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits", "Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost =", "############################################# # 3.2 Initialize Variables # ################################################### # 3.4 Optimization Iteration # feed_dict_test", "Image Classification Using Linear Regression # ################################################ # 1.1 Load the libraries #", "= 10 data.test.cls = np.array([label.argmax() for label in data.test.labels]) ########################################### # 1.5 Plot", "############################################## # 3.1 Create TensorFlow Session # session = tf.Session() ############################################# # 3.2", "# 2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6", "= lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx in [x/10 for x", "in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate", "of this length. # img_size_flat = data.train.images[0].shape[0] # Tuple with height and width", "weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost", "tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes])", "Number of iteration means how many of batchs are iterated # print(\"Gradient decent", "tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables #", "feed_dict_train = {x : x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict =", "session.run(tf.global_variables_initializer()) for lrx in [x/10 for x in range(5,0,-1)]: for b in range(1,1000,100):", "feed_dict_test) # Print the accuracy. print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning", ": data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label) for label in data.test.labels] }", "cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use", "compute the accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test) # Print the accuracy.", "<filename>lesson7/lesson7.1.py ############################################### # MNIST Image Classification Using Linear Regression # ################################################ # 1.1", "[x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate =", "i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx in [x/10 for", "# images = data.test.images[0:9] # Get the true classes for those images. cls_true", "logits= logits, labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function", "tf.Session() ############################################# # 3.2 Initialize Variables # ################################################### # 3.4 Optimization Iteration #", "Print the accuracy. print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning rate and", "y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch, lr: learning_rate, y_true :", "[x/10 for x in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in", "####################################################### # the images are stored in one-dimensional arrays of this length. #", "} ############################################# # 4.2 Performance Iteration#1 # # Number of iteration means how", "Test-set. # images = data.test.images[0:9] # Get the true classes for those images.", "= tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None,", "4.2 Performance Iteration#1 # # Number of iteration means how many of batchs", "# 1.1 Load the libraries # import sys import matplotlib.pyplot as plt import", "few images # Get the first images from the Test-set. # images =", "# print(\"Gradient decent optimizer\") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer())", "as tf import numpy as np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import", "# 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### #", "optimizer\") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in", "x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100)", "sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations):", "def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the accuracy. acc = session.run(accuracy ,", "images = data.test.images[0:9] # Get the true classes for those images. cls_true =", "label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true =", "# the images are stored in one-dimensional arrays of this length. # img_size_flat", "libraries # import sys import matplotlib.pyplot as plt import tensorflow as tf import", "of iteration means how many of batchs are iterated # print(\"Gradient decent optimizer\")", "for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100)", "################################################### # 3.4 Optimization Iteration # feed_dict_test = { x : data.test.images, y_true", "feed_dict = feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm) def", "########################################### # 1.5 Plot a few images # Get the first images from", "in [x/10 for x in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i", "num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits = tf.matmul(x, weights)", "width of images used to reshape arrays. img_shape = (28,28) # Number of", "np.array([label.argmax() for label in data.test.labels]) ########################################### # 1.5 Plot a few images #", "optimizer with incremental batch size \") session.run(tf.global_variables_initializer()) for lrx in [x/10 for x", "############################################## # 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ###############################################", "variables # lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true =", "lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch size \") session.run(tf.global_variables_initializer()) for lrx", "y_true : data.test.labels, y_true_cls : [np.argmax(label) for label in data.test.labels] } ############################################# #", "arrays. img_shape = (28,28) # Number of classes, one class for each of", "confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch", "for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with", "lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32,", "tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(", "the Test-set. # images = data.test.images[0:9] # Get the true classes for those", "tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization", "print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx in [x/10 for x in range(5,0,-1)]:", "Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost = tf.reduce_mean(cross_entropy)", "old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### #", "data.test.cls = np.array([label.argmax() for label in data.test.labels]) ########################################### # 1.5 Plot a few", "data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer,", ": x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix():", "images are stored in one-dimensional arrays of this length. # img_size_flat = data.train.images[0].shape[0]", "range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch, lr: learning_rate,", "oh in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables # lr = tf.placeholder(tf.float32)", "[np.argmax(label) for label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm =", "# gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures # correct_prediction", "to compute the accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test) # Print the", "import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from", "with {:d} iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2", "size \") session.run(tf.global_variables_initializer()) for lrx in [x/10 for x in range(5,0,-1)]: for b", "= session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred)", "Classification Using Linear Regression # ################################################ # 1.1 Load the libraries # import", "confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to", "data.test.labels]) ########################################### # 1.5 Plot a few images # Get the first images", "in data.test.labels] } ############################################# # 4.2 Performance Iteration#1 # # Number of iteration", "def print_confusion_matrix(): cls_true = [np.argmax(label) for label in data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict", "\") session.run(tf.global_variables_initializer()) for lrx in [x/10 for x in range(5,0,-1)]: for b in", "lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer \") for lrx in [x/10 for x in", "print(\"Gradient decent optimizer\") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for", ": y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for label", "= cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute", "3.4 Optimization Iteration # feed_dict_test = { x : data.test.images, y_true : data.test.labels,", "print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size))", "how many of batchs are iterated # print(\"Gradient decent optimizer\") for lrx in", "# Get the true classes for those images. cls_true = [np.argmax(oh) for oh", "y_true_batch} session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for label in", "2.6 Performance measures # correct_prediction = tf.equal( y_pred_cls , y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction,", "= data.train.images[0].shape[0] # Tuple with height and width of images used to reshape", "= feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size):", "labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function # gradient_descent_optimizer", "the accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy", "= y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function # gradient_descent_optimizer =", "session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer", "a few images # Get the first images from the Test-set. # images", "= tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls =", "feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for label in data.test.labels] cls_pred", "the accuracy. print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning rate and {:d}", "TensorFlow Session # session = tf.Session() ############################################# # 3.2 Initialize Variables # ###################################################", "b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,b)", "[np.argmax(oh) for oh in data.test.labels[0:9] ] ############################################## # 2.1 Placeholder variables # lr", "= True) tf.logging.set_verbosity(old_v) ####################################################### # the images are stored in one-dimensional arrays of", "############################################## # 2.1 Placeholder variables # lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32,", "and width of images used to reshape arrays. img_shape = (28,28) # Number", "lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(gradient_descent_optimizer,num_iterations=", "print(\"Adagra optimizer with incremental batch size \") session.run(tf.global_variables_initializer()) for lrx in [x/10 for", "Load the libraries # import sys import matplotlib.pyplot as plt import tensorflow as", "data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v)", "= tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes]))", "Use TensorFlow to compute the accuracy. acc = session.run(accuracy , feed_dict= feed_dict_test) #", "read MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\", one_hot =", "for label in data.test.labels]) ########################################### # 1.5 Plot a few images # Get", "num_classes = 10 data.test.cls = np.array([label.argmax() for label in data.test.labels]) ########################################### # 1.5", "[1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch size", "incremental batch size \") session.run(tf.global_variables_initializer()) for lrx in [x/10 for x in range(5,0,-1)]:", "= input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the images are stored in", "session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer", "print(\"Adagra optimizer \") for lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for", "y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None]) ############################################## #", "x in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations=", "are stored in one-dimensional arrays of this length. # img_size_flat = data.train.images[0].shape[0] #", "= lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch size \") session.run(tf.global_variables_initializer()) for", "each of 10 digits. num_classes = 10 data.test.cls = np.array([label.argmax() for label in", "Iteration#1 # # Number of iteration means how many of batchs are iterated", "cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the", "acc = session.run(accuracy , feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy : {:2.1f}%", "accuracy. print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning rate and {:d} batch", "input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train", "tf.float32)) ############################################## # 3.1 Create TensorFlow Session # session = tf.Session() ############################################# #", "x = tf.placeholder( tf.float32, [None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls", "for x in range(5,0,-1)]: for b in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]:", "in data.test.labels]) ########################################### # 1.5 Plot a few images # Get the first", "in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix()", "tf.argmax(y_pred, axis=1) # 2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels", "tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model # logits = tf.matmul(x,", "i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental", "{:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read", "iterated # print(\"Gradient decent optimizer\") for lrx in [x/10 for x in range(5,0,-1)]:", "y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost)", "tf import numpy as np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data", "= tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost = tf.reduce_mean(cross_entropy) ################################################ # 2.5", "= confusion_matrix(y_true = cls_true, y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow", "= tf.reduce_mean(cross_entropy) ################################################ # 2.5 Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer =", "[x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate =", "Optimization Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures", "i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x : x_batch,", "import sys import matplotlib.pyplot as plt import tensorflow as tf import numpy as", "cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred =", "Number of classes, one class for each of 10 digits. num_classes = 10", "import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix from", "= { x : data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label) for label", "session.run(accuracy , feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy : {:2.1f}% with {:d}", "# # Number of iteration means how many of batchs are iterated #", "batch_size) feed_dict_train = {x : x_batch, lr: learning_rate, y_true : y_true_batch} session.run(optimizer, feed_dict", "Download and read MNIST data # old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) data = input_data.read_data_sets(\"MNIST_data/\",", "# Tuple with height and width of images used to reshape arrays. img_shape", "Function # gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost) # 2.6 Performance measures #", "optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,100) #print_confusion_matrix() print(\"Adagra optimizer with incremental batch size \")", "as np from sklearn.metrics import confusion_matrix from tensorflow.examples.tutorials.mnist import input_data def optimize(optimizer,num_iterations,learning_rate,batch_size): for", "images from the Test-set. # images = data.test.images[0:9] # Get the true classes", "tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, axis=1) # 2.4", "feed_dict= feed_dict_test) # Print the accuracy. print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f}", "] ############################################## # 2.1 Placeholder variables # lr = tf.placeholder(tf.float32) x = tf.placeholder(", "lrx in [x/10 for x in range(5,0,-1)]: session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations=", "{:2.1f}% with {:d} iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ #", "tf.int64, [None]) ############################################## # 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias =", "and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read MNIST data #", "# import sys import matplotlib.pyplot as plt import tensorflow as tf import numpy", "2.4 Cost Function # cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true) cost", "# img_size_flat = data.train.images[0].shape[0] # Tuple with height and width of images used", "# MNIST Image Classification Using Linear Regression # ################################################ # 1.1 Load the", "session.run(optimizer, feed_dict = feed_dict_train) def print_confusion_matrix(): cls_true = [np.argmax(label) for label in data.test.labels]", "print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the accuracy. acc = session.run(accuracy", "# 3.4 Optimization Iteration # feed_dict_test = { x : data.test.images, y_true :", "2.3 Model # logits = tf.matmul(x, weights) + bias y_pred = tf.nn.softmax(logits) y_pred_cls", "cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the accuracy. acc =", "in range(1,1000,100): session.run(tf.global_variables_initializer()) for i in [1,9,990]: optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100) print_accuracy(i,lrx,b) print_confusion_matrix()", "# 2.1 Placeholder variables # lr = tf.placeholder(tf.float32) x = tf.placeholder( tf.float32, [None,", "# weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias = tf.Variable(tf.zeros([num_classes])) ############################################### # 2.3 Model #", "input_data.read_data_sets(\"MNIST_data/\", one_hot = True) tf.logging.set_verbosity(old_v) ####################################################### # the images are stored in one-dimensional", "optimize(optimizer,num_iterations,learning_rate,batch_size): for i in range(num_iterations): x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size) feed_dict_train = {x", "first images from the Test-set. # images = data.test.images[0:9] # Get the true", "y_pred = cls_pred) print(cm) def print_accuracy(iterations,learning_rate,batch_size): # Use TensorFlow to compute the accuracy.", "[None, img_size_flat]) y_true = tf.placeholder( tf.float32, [None, num_classes]) y_true_cls = tf.placeholder( tf.int64, [None])", "size'.format((acc*100),iterations,learning_rate,batch_size)) ################################################ # 1.2 Download and read MNIST data # old_v = tf.logging.get_verbosity()", "tf.placeholder( tf.int64, [None]) ############################################## # 2.2 Variables # weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) bias", "x : data.test.images, y_true : data.test.labels, y_true_cls : [np.argmax(label) for label in data.test.labels]", "data.test.labels] cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test) cm = confusion_matrix(y_true = cls_true, y_pred", "#print_confusion_matrix() print(\"Adagra optimizer with incremental batch size \") session.run(tf.global_variables_initializer()) for lrx in [x/10" ]
[ "Runtime: 76 ms Memory Usage: 15.9 MB \"\"\" class Solution: def longestWord(self, words:", "word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk = list(trie.values()) ans =", "or len(word) == len(ans) and word < ans: ans = word stk.extend([cur[letter] for", "word in words: if len(word) == 1 or word[0:-1] in s: if len(word)", "List[str]) -> str: Trie = lambda: collections.defaultdict(Trie) trie = Trie() END = True", "functools.reduce(dict.__getitem__, word, trie)[END] = i stk = list(trie.values()) ans = \"\" while stk:", "ms Memory Usage: 15.9 MB \"\"\" class Solution: def longestWord(self, words: List[str]) ->", "len(ans) or len(word) == len(ans) and word < ans: ans = word stk.extend([cur[letter]", "letter in cur if letter != END]) return ans \"\"\" 59 / 59", "str: Trie = lambda: collections.defaultdict(Trie) trie = Trie() END = True for i,", "in words: if len(word) == 1 or word[0:-1] in s: if len(word) >", "class Solution: def longestWord(self, words: List[str]) -> str: Trie = lambda: collections.defaultdict(Trie) trie", "List[str]) -> str: words.sort() s = set() ans = \"\" for word in", "set() ans = \"\" for word in words: if len(word) == 1 or", "stk.pop() if END in cur: word = words[cur[END]] if len(word) > len(ans) or", "!= END]) return ans \"\"\" 59 / 59 test cases passed. Runtime: 48", "Usage: 15.9 MB \"\"\" class Solution: def longestWord(self, words: List[str]) -> str: Trie", "len(word) > len(ans) or len(word) == len(ans) and word < ans: ans =", "and word < ans: ans = word stk.extend([cur[letter] for letter in cur if", "str: words.sort() s = set() ans = \"\" for word in words: if", "words: List[str]) -> str: words.sort() s = set() ans = \"\" for word", "END]) return ans \"\"\" 59 / 59 test cases passed. Runtime: 48 ms", "Solution: def longestWord(self, words: List[str]) -> str: Trie = lambda: collections.defaultdict(Trie) trie =", "cur = stk.pop() if END in cur: word = words[cur[END]] if len(word) >", "word = words[cur[END]] if len(word) > len(ans) or len(word) == len(ans) and word", "ans = \"\" while stk: cur = stk.pop() if END in cur: word", "Longest Word in Dictionary/solution.py \"\"\" 59 / 59 test cases passed. Runtime: 76", "trie = Trie() END = True for i, word in enumerate(words): functools.reduce(dict.__getitem__, word,", "letter != END]) return ans \"\"\" 59 / 59 test cases passed. Runtime:", "stk: cur = stk.pop() if END in cur: word = words[cur[END]] if len(word)", "words: List[str]) -> str: Trie = lambda: collections.defaultdict(Trie) trie = Trie() END =", "stk.extend([cur[letter] for letter in cur if letter != END]) return ans \"\"\" 59", "return ans \"\"\" 59 / 59 test cases passed. Runtime: 48 ms Memory", "< ans: ans = word stk.extend([cur[letter] for letter in cur if letter !=", "words: if len(word) == 1 or word[0:-1] in s: if len(word) > len(ans):", "= i stk = list(trie.values()) ans = \"\" while stk: cur = stk.pop()", "Usage: 15.4 MB \"\"\" class Solution2: def longestWord(self, words: List[str]) -> str: words.sort()", "END = True for i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i", "cur: word = words[cur[END]] if len(word) > len(ans) or len(word) == len(ans) and", "longestWord(self, words: List[str]) -> str: Trie = lambda: collections.defaultdict(Trie) trie = Trie() END", "= Trie() END = True for i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END]", "cur if letter != END]) return ans \"\"\" 59 / 59 test cases", "-> str: Trie = lambda: collections.defaultdict(Trie) trie = Trie() END = True for", "= lambda: collections.defaultdict(Trie) trie = Trie() END = True for i, word in", "cases passed. Runtime: 76 ms Memory Usage: 15.9 MB \"\"\" class Solution: def", "or word[0:-1] in s: if len(word) > len(ans): ans = word s.add(word) return", "59 / 59 test cases passed. Runtime: 48 ms Memory Usage: 15.4 MB", "ms Memory Usage: 15.4 MB \"\"\" class Solution2: def longestWord(self, words: List[str]) ->", "= stk.pop() if END in cur: word = words[cur[END]] if len(word) > len(ans)", "ans = \"\" for word in words: if len(word) == 1 or word[0:-1]", "Trie() END = True for i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] =", "True for i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk =", "\"\"\" class Solution2: def longestWord(self, words: List[str]) -> str: words.sort() s = set()", "trie)[END] = i stk = list(trie.values()) ans = \"\" while stk: cur =", "59 / 59 test cases passed. Runtime: 76 ms Memory Usage: 15.9 MB", "in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk = list(trie.values()) ans = \"\"", "= word stk.extend([cur[letter] for letter in cur if letter != END]) return ans", "76 ms Memory Usage: 15.9 MB \"\"\" class Solution: def longestWord(self, words: List[str])", "= list(trie.values()) ans = \"\" while stk: cur = stk.pop() if END in", "/ 59 test cases passed. Runtime: 48 ms Memory Usage: 15.4 MB \"\"\"", "Word in Dictionary/solution.py \"\"\" 59 / 59 test cases passed. Runtime: 76 ms", "15.9 MB \"\"\" class Solution: def longestWord(self, words: List[str]) -> str: Trie =", "59 test cases passed. Runtime: 48 ms Memory Usage: 15.4 MB \"\"\" class", "== len(ans) and word < ans: ans = word stk.extend([cur[letter] for letter in", "\"\" while stk: cur = stk.pop() if END in cur: word = words[cur[END]]", "> len(ans) or len(word) == len(ans) and word < ans: ans = word", "len(word) == 1 or word[0:-1] in s: if len(word) > len(ans): ans =", "passed. Runtime: 76 ms Memory Usage: 15.9 MB \"\"\" class Solution: def longestWord(self,", "== 1 or word[0:-1] in s: if len(word) > len(ans): ans = word", "ans \"\"\" 59 / 59 test cases passed. Runtime: 48 ms Memory Usage:", "15.4 MB \"\"\" class Solution2: def longestWord(self, words: List[str]) -> str: words.sort() s", "END in cur: word = words[cur[END]] if len(word) > len(ans) or len(word) ==", "59 test cases passed. Runtime: 76 ms Memory Usage: 15.9 MB \"\"\" class", "<filename>LeetCode/0720. Longest Word in Dictionary/solution.py \"\"\" 59 / 59 test cases passed. Runtime:", "in cur: word = words[cur[END]] if len(word) > len(ans) or len(word) == len(ans)", "words.sort() s = set() ans = \"\" for word in words: if len(word)", "= \"\" while stk: cur = stk.pop() if END in cur: word =", "ans = word stk.extend([cur[letter] for letter in cur if letter != END]) return", "\"\"\" 59 / 59 test cases passed. Runtime: 76 ms Memory Usage: 15.9", "1 or word[0:-1] in s: if len(word) > len(ans): ans = word s.add(word)", "in Dictionary/solution.py \"\"\" 59 / 59 test cases passed. Runtime: 76 ms Memory", "enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk = list(trie.values()) ans = \"\" while", "= True for i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk", "for letter in cur if letter != END]) return ans \"\"\" 59 /", "def longestWord(self, words: List[str]) -> str: words.sort() s = set() ans = \"\"", "len(word) == len(ans) and word < ans: ans = word stk.extend([cur[letter] for letter", "collections.defaultdict(Trie) trie = Trie() END = True for i, word in enumerate(words): functools.reduce(dict.__getitem__,", "Memory Usage: 15.9 MB \"\"\" class Solution: def longestWord(self, words: List[str]) -> str:", "while stk: cur = stk.pop() if END in cur: word = words[cur[END]] if", "Memory Usage: 15.4 MB \"\"\" class Solution2: def longestWord(self, words: List[str]) -> str:", "i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk = list(trie.values()) ans", "Dictionary/solution.py \"\"\" 59 / 59 test cases passed. Runtime: 76 ms Memory Usage:", "test cases passed. Runtime: 76 ms Memory Usage: 15.9 MB \"\"\" class Solution:", "48 ms Memory Usage: 15.4 MB \"\"\" class Solution2: def longestWord(self, words: List[str])", "for word in words: if len(word) == 1 or word[0:-1] in s: if", "stk = list(trie.values()) ans = \"\" while stk: cur = stk.pop() if END", "= \"\" for word in words: if len(word) == 1 or word[0:-1] in", "\"\"\" 59 / 59 test cases passed. Runtime: 48 ms Memory Usage: 15.4", "test cases passed. Runtime: 48 ms Memory Usage: 15.4 MB \"\"\" class Solution2:", "word[0:-1] in s: if len(word) > len(ans): ans = word s.add(word) return ans", "if len(word) == 1 or word[0:-1] in s: if len(word) > len(ans): ans", "if END in cur: word = words[cur[END]] if len(word) > len(ans) or len(word)", "in cur if letter != END]) return ans \"\"\" 59 / 59 test", "MB \"\"\" class Solution: def longestWord(self, words: List[str]) -> str: Trie = lambda:", "i stk = list(trie.values()) ans = \"\" while stk: cur = stk.pop() if", "= words[cur[END]] if len(word) > len(ans) or len(word) == len(ans) and word <", "/ 59 test cases passed. Runtime: 76 ms Memory Usage: 15.9 MB \"\"\"", "list(trie.values()) ans = \"\" while stk: cur = stk.pop() if END in cur:", "Trie = lambda: collections.defaultdict(Trie) trie = Trie() END = True for i, word", "if len(word) > len(ans) or len(word) == len(ans) and word < ans: ans", "word < ans: ans = word stk.extend([cur[letter] for letter in cur if letter", "-> str: words.sort() s = set() ans = \"\" for word in words:", "Runtime: 48 ms Memory Usage: 15.4 MB \"\"\" class Solution2: def longestWord(self, words:", "Solution2: def longestWord(self, words: List[str]) -> str: words.sort() s = set() ans =", "\"\"\" class Solution: def longestWord(self, words: List[str]) -> str: Trie = lambda: collections.defaultdict(Trie)", "\"\" for word in words: if len(word) == 1 or word[0:-1] in s:", "cases passed. Runtime: 48 ms Memory Usage: 15.4 MB \"\"\" class Solution2: def", "= set() ans = \"\" for word in words: if len(word) == 1", "def longestWord(self, words: List[str]) -> str: Trie = lambda: collections.defaultdict(Trie) trie = Trie()", "word stk.extend([cur[letter] for letter in cur if letter != END]) return ans \"\"\"", "lambda: collections.defaultdict(Trie) trie = Trie() END = True for i, word in enumerate(words):", "passed. Runtime: 48 ms Memory Usage: 15.4 MB \"\"\" class Solution2: def longestWord(self,", "word, trie)[END] = i stk = list(trie.values()) ans = \"\" while stk: cur", "len(ans) and word < ans: ans = word stk.extend([cur[letter] for letter in cur", "s = set() ans = \"\" for word in words: if len(word) ==", "longestWord(self, words: List[str]) -> str: words.sort() s = set() ans = \"\" for", "class Solution2: def longestWord(self, words: List[str]) -> str: words.sort() s = set() ans", "if letter != END]) return ans \"\"\" 59 / 59 test cases passed.", "ans: ans = word stk.extend([cur[letter] for letter in cur if letter != END])", "words[cur[END]] if len(word) > len(ans) or len(word) == len(ans) and word < ans:", "MB \"\"\" class Solution2: def longestWord(self, words: List[str]) -> str: words.sort() s =", "for i, word in enumerate(words): functools.reduce(dict.__getitem__, word, trie)[END] = i stk = list(trie.values())" ]
[ "as animation import time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to", "degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p =", "3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) #", "#for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on the base", "as np from scipy.spatial.transform import Rotation as R import matplotlib.pyplot as plt import", "= [] ys = [] r = [] p = [] y =", "#iters=any([data[0, :], data[1, :], data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0])", "data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]])) #iters=any([data[0, :],", "base station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs =", "[] def animate(i, xs, r, p, y): start_time = time.time() data = orientation.read()", "orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r =", "= 3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100)", "sensor on the base station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1)", "axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time() - start_time))", "data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2,", "= [] def animate(i, xs, r, p, y): start_time = time.time() data =", "matplotlib.pyplot as plt import matplotlib.animation as animation import time #Reading one sensor accel", "#limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop()", "emg_channels = sensors_number orientation_channels = 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1)", "= plt.subplots(3) xs = [] ys = [] r = [] p =", "station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3)", "from pytrigno import TrignoOrientation import numpy as np from scipy.spatial.transform import Rotation as", ":], data[3, :]])) #iters=any([data[0, :], data[1, :], data[2, :], data[3, :]]) orientation_rpy =", "#orientation.pair_sensor(1) #print('Place the sensor on the base station magnet to pair') #time.sleep(5) #orientation.is_paired(1)", "[] r = [] p = [] y = [] def animate(i, xs,", "orientation_channels = 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor", "(0,1,2 according to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number", "time.time() data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]])", "p, y): start_time = time.time() data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat", "from pytrigno import TrignoAccel from pytrigno import TrignoEMG from pytrigno import TrignoOrientation import", "import matplotlib.animation as animation import time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit", "data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats", "TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on the base station magnet to pair')", "quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on the base station", "axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time() - start_time)) ani =", "= R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]])) #iters=any([data[0, :], data[1, :],", "accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels =", "accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z)", "#print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels", "sensors_number orientation_channels = 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the", "sensors_number = 1 acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number #for", "r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:]", "accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels", "orientation.what_mode(1) fig, axs = plt.subplots(3) xs = [] ys = [] r =", "print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:] y = y[-1000:]", "data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z) #t.start()", "TrignoEMG from pytrigno import TrignoOrientation import numpy as np from scipy.spatial.transform import Rotation", "plt import matplotlib.animation as animation import time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100)", "according to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number =", "data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:])", "R import matplotlib.pyplot as plt import matplotlib.animation as animation import time #Reading one", "= 1 acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number #for quaternion", "orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs = [] ys = [] r", "import numpy as np from scipy.spatial.transform import Rotation as R import matplotlib.pyplot as", "R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]])) #iters=any([data[0, :], data[1, :], data[2,", "y): start_time = time.time() data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat =", "axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time() - start_time)) ani", "#acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear()", "one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x,", "axs = plt.subplots(3) xs = [] ys = [] r = [] p", "xs, r, p, y): start_time = time.time() data = orientation.read() if any([data[0,-1], data[1,-1],", "TrignoOrientation import numpy as np from scipy.spatial.transform import Rotation as R import matplotlib.pyplot", "= R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3,", "#print(data) sensors_number = 1 acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number", "R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]]))", "- start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval= 100) plt.show()", "---\" % (time.time() - start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y),", "fig, axs = plt.subplots(3) xs = [] ys = [] r = []", "p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds", "xs = [] ys = [] r = [] p = [] y", "ys = [] r = [] p = [] y = [] def", "= orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats =", "channels to 3 (0,1,2 according to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape,", "orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0,", "#orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs = [] ys = []", ":]])) #iters=any([data[0, :], data[1, :], data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True)", "TrignoAccel from pytrigno import TrignoEMG from pytrigno import TrignoOrientation import numpy as np", "[] y = [] def animate(i, xs, r, p, y): start_time = time.time()", "#time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs = [] ys", "data[1, :], data[2, :], data[3, :]])) #iters=any([data[0, :], data[1, :], data[2, :], data[3,", "import TrignoOrientation import numpy as np from scipy.spatial.transform import Rotation as R import", "matplotlib.animation as animation import time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels", "animation import time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3", "= sensors_number orientation_channels = 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place", "# #orientation.pair_sensor(1) #print('Place the sensor on the base station magnet to pair') #time.sleep(5)", "axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time() - start_time)) ani = animation.FuncAnimation(fig,", "y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:] y =", "data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :],", "data.sum()) #print(data) sensors_number = 1 acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels =", "orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on the base station magnet", "#orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]])) #iters=any([data[0, :], data[1,", "the sensor on the base station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start()", "pytrigno import TrignoAccel from pytrigno import TrignoEMG from pytrigno import TrignoOrientation import numpy", "y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time()", "data[1, :], data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2])", "= p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f", "p = p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"---", "#t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels = 3*sensors_number emg_channels = sensors_number", "%f seconds ---\" % (time.time() - start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r,", "r = r[-1000:] p = p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r)", "import time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2", "scipy.spatial.transform import Rotation as R import matplotlib.pyplot as plt import matplotlib.animation as animation", "as plt import matplotlib.animation as animation import time #Reading one sensor accel data:", "acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number #for quaternion orientation =", ":], data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data))", "y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\"", "= 4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on", "from scipy.spatial.transform import Rotation as R import matplotlib.pyplot as plt import matplotlib.animation as", "4*sensors_number #for quaternion orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on the", "% (time.time() - start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval=", "if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :],", "numpy as np from scipy.spatial.transform import Rotation as R import matplotlib.pyplot as plt", "#acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:] y = y[-1000:] axs[0].clear()", "plt.subplots(3) xs = [] ys = [] r = [] p = []", "#Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to", "p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:] y", "on the base station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig,", "#t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z) #t.start() #data=t.read()", "p = [] y = [] def animate(i, xs, r, p, y): start_time", "the base station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs", "from pytrigno import TrignoEMG from pytrigno import TrignoOrientation import numpy as np from", ":], data[1, :], data[2, :], data[3, :]])) #iters=any([data[0, :], data[1, :], data[2, :],", "= r[-1000:] p = p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p)", ":]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r", "data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:])", "1 acc_channels = 3*sensors_number emg_channels = sensors_number orientation_channels = 4*sensors_number #for quaternion orientation", "= orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:]", "= [] r = [] p = [] y = [] def animate(i,", "seconds ---\" % (time.time() - start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p,", "def animate(i, xs, r, p, y): start_time = time.time() data = orientation.read() if", "= [] p = [] y = [] def animate(i, xs, r, p,", "= time.time() data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1],", "pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs = []", "data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]])) #iters=any([data[0,", "r[-1000:] p = p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y)", "pytrigno import TrignoEMG from pytrigno import TrignoOrientation import numpy as np from scipy.spatial.transform", "to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1", "3 (0,1,2 according to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data)", "accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels = 3*sensors_number", "r = [] p = [] y = [] def animate(i, xs, r,", "print(\"--- %f seconds ---\" % (time.time() - start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs,", "data[2, :], data[3, :]])) #iters=any([data[0, :], data[1, :], data[2, :], data[3, :]]) orientation_rpy", ":], data[1, :], data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1])", "orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:]) #acc_z.extend(data[2,:]) r = r[-1000:] p", ":], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx', degrees=True) r.append(orientation_rpy[0]) p.append(orientation_rpy[1]) y.append(orientation_rpy[2]) print(np.shape(data)) #acc_x.extend(data[0,:]) #acc_y.extend(data[1,:])", "np from scipy.spatial.transform import Rotation as R import matplotlib.pyplot as plt import matplotlib.animation", "magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs", "axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time() -", "animate(i, xs, r, p, y): start_time = time.time() data = orientation.read() if any([data[0,-1],", "to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs =", "#t.start() #data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels = 3*sensors_number emg_channels", "#data=t.read() #t.stop() #print(data.shape, data.sum()) #print(data) sensors_number = 1 acc_channels = 3*sensors_number emg_channels =", "y = [] def animate(i, xs, r, p, y): start_time = time.time() data", "[] ys = [] r = [] p = [] y = []", "r, p, y): start_time = time.time() data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]):", "data[3, :]])) #iters=any([data[0, :], data[1, :], data[2, :], data[3, :]]) orientation_rpy = orientation_quat.as_euler('zyx',", "time #Reading one sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according", "Rotation as R import matplotlib.pyplot as plt import matplotlib.animation as animation import time", "pytrigno import TrignoOrientation import numpy as np from scipy.spatial.transform import Rotation as R", "(time.time() - start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval= 100)", "any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1,", "orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]) #orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :],", "sensor accel data: #t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y,", "import matplotlib.pyplot as plt import matplotlib.animation as animation import time #Reading one sensor", "axs[2].plot(y) print(\"--- %f seconds ---\" % (time.time() - start_time)) ani = animation.FuncAnimation(fig, animate,", "#orientation.is_paired(1) #orientation.is_active(1) orientation.start() orientation.what_mode(1) fig, axs = plt.subplots(3) xs = [] ys =", "as R import matplotlib.pyplot as plt import matplotlib.animation as animation import time #Reading", "= [] y = [] def animate(i, xs, r, p, y): start_time =", ":], data[2, :], data[3, :]])) #iters=any([data[0, :], data[1, :], data[2, :], data[3, :]])", "= TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100) # #orientation.pair_sensor(1) #print('Place the sensor on the base station magnet to", "#acc_z.extend(data[2,:]) r = r[-1000:] p = p[-1000:] y = y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear()", "= y[-1000:] axs[0].clear() axs[1].clear() axs[2].clear() axs[0].plot(r) axs[1].plot(p) axs[2].plot(y) print(\"--- %f seconds ---\" %", "start_time)) ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval= 100) plt.show() orientation.stop()", "import TrignoEMG from pytrigno import TrignoOrientation import numpy as np from scipy.spatial.transform import", "start_time = time.time() data = orientation.read() if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]): orientation_quat = R.from_quat([data[0,-1],", "to 3 (0,1,2 according to accel_x, accel_y, accel_z) #t.start() #data=t.read() #t.stop() #print(data.shape, data.sum())", "[] p = [] y = [] def animate(i, xs, r, p, y):", "#print('Place the sensor on the base station magnet to pair') #time.sleep(5) #orientation.is_paired(1) #orientation.is_active(1)", "import Rotation as R import matplotlib.pyplot as plt import matplotlib.animation as animation import", "import TrignoAccel from pytrigno import TrignoEMG from pytrigno import TrignoOrientation import numpy as" ]
[ "def createCharts(chartDefs, dbPath, force): charts = {} try: database = db.DBManager(dbPath) database.open(force) for", "= createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def", "chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {}", "the DB expects a string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \"", "= chartDef return chartDefs def createCharts(chartDefs, dbPath, force): charts = {} try: database", "def columnChart(self, data): chartDict = self.chartJSON columns = zip(*data) # First, labels chartDict['xAxis']['categories']", "self.function) data = self.fetchData() print data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def", ": 'I', 4 : 'II', 6 : 'I/II', 8 : 'III', 12 :", "e: print e sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self, id, definition, db):", "definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function) data = self.fetchData() print data return", "from . import db from . import utils def create(chartsPath, dbPath, force): jsonString", "definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod", "as an array so it's legible in the JSON, but the DB expects", "chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict =", "= columns[0] # Second, all the data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data']", "a string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] =", "self.db = db self.id = id self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type", "db self.id = id self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"]", "return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data return chartDict", "as e: print e sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self, id, definition,", "= definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON =", "First, labels chartDict['xAxis']['categories'] = columns[0] # Second, all the data for (index, col)", "from . import utils def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force)", "self.chartJSON chartDict['series'][0]['data'] = data return chartDict def phaseChart(self, data): chartDict = self.chartJSON newData", "def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data", "fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data return", "chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close() except", "force): charts = {} try: database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in", "all the data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict", "columns = zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0] # Second, all the", "16 : 'IV'} for (phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData", "chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs, dbPath, force):", "self.chartJSON newData = [] phases = {0 : 'None', 1 : '0', 2", "for (chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict()", "Have to store the SQL as an array so it's legible in the", "= json.load(jsonFile) jsonFile.close() chartDefs = {} # Have to store the SQL as", "count]) chartDict['series'][0]['data'] = newData return chartDict def columnChart(self, data): chartDict = self.chartJSON columns", ": 'I/II', 8 : 'III', 12 : 'II/III', 16 : 'IV'} for (phase,", "'II/III', 16 : 'IV'} for (phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] =", "createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs():", "chartDict['xAxis']['categories'] = columns[0] # Second, all the data for (index, col) in enumerate(columns[1:]):", ": 'III', 12 : 'II/III', 16 : 'IV'} for (phase, count) in data:", "force): jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString)", "but the DB expects a string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] =", "data return chartDict def phaseChart(self, data): chartDict = self.chartJSON newData = [] phases", "def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {} #", "col return chartDict # Default function is main() if __name__ == '__main__': main()", "print e sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self, id, definition, db): self.db", "charts[chart.id] = chart.chartDict() database.close() except db.DBException as e: print e sys.exit(1) return json.dumps(charts)", "db): self.db = db self.id = id self.sql = definition[\"sql\"] self.name = definition[\"name\"]", "class Chart(object): def __init__(self, id, definition, db): self.db = db self.id = id", "self.name = definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def", "chartDict['series'][index]['data'] = col return chartDict # Default function is main() if __name__ ==", "= col return chartDict # Default function is main() if __name__ == '__main__':", "self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function) data = self.fetchData() print", "newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict def columnChart(self, data): chartDict = self.chartJSON", "(chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close()", ": 'None', 1 : '0', 2 : 'I', 4 : 'II', 6 :", "= {} try: database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart", "= file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {} # Have to store", "'I', 4 : 'II', 6 : 'I/II', 8 : 'III', 12 : 'II/III',", "in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict def columnChart(self, data): chartDict", "DB expects a string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"])", "id, definition, db): self.db = db self.id = id self.sql = definition[\"sql\"] self.name", "return chartDict def columnChart(self, data): chartDict = self.chartJSON columns = zip(*data) # First,", "= definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self,", "def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data return chartDict def phaseChart(self,", "__init__(self, id, definition, db): self.db = db self.id = id self.sql = definition[\"sql\"]", "self.fetchData() print data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict", "so it's legible in the JSON, but the DB expects a string for", "= definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function) data = self.fetchData() print data", "\" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs, dbPath, force): charts =", "for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return", ". import utils def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force) if", "the SQL as an array so it's legible in the JSON, but the", "= self.chartJSON columns = zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0] # Second,", "definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"]", "sys from . import db from . import utils def create(chartsPath, dbPath, force):", "phaseChart(self, data): chartDict = self.chartJSON newData = [] phases = {0 : 'None',", "chartDefs = {} # Have to store the SQL as an array so", "{} # Have to store the SQL as an array so it's legible", "chartDef, database) charts[chart.id] = chart.chartDict() database.close() except db.DBException as e: print e sys.exit(1)", "self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod =", "def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile =", "chartDef return chartDefs def createCharts(chartDefs, dbPath, force): charts = {} try: database =", "database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef,", "= Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close() except db.DBException as e: print", "chartDict(self): chartMethod = getattr(self, self.function) data = self.fetchData() print data return chartMethod(data) def", "chartDict = self.chartJSON newData = [] phases = {0 : 'None', 1 :", "'III', 12 : 'II/III', 16 : 'IV'} for (phase, count) in data: newData.append([phases[phase],", "= zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0] # Second, all the data", "= open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile)", "(chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs", "db.DBException as e: print e sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self, id,", "newData return chartDict def columnChart(self, data): chartDict = self.chartJSON columns = zip(*data) #", "col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict # Default function is main()", "jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {} # Have to store the SQL", "jsonFile.close() chartDefs = {} # Have to store the SQL as an array", "to store the SQL as an array so it's legible in the JSON,", "legible in the JSON, but the DB expects a string for (chartID, chartDef)", "charts = {} try: database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems():", "= self.chartJSON newData = [] phases = {0 : 'None', 1 : '0',", "chart = Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close() except db.DBException as e:", "an array so it's legible in the JSON, but the DB expects a", "data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data return chartDict def phaseChart(self, data): chartDict", "definition, db): self.db = db self.id = id self.sql = definition[\"sql\"] self.name =", "\".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs, dbPath, force): charts = {}", "json.dumps(charts) class Chart(object): def __init__(self, id, definition, db): self.db = db self.id =", "count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict def columnChart(self, data):", "return chartDict def phaseChart(self, data): chartDict = self.chartJSON newData = [] phases =", "enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict # Default function is main() if __name__", "# First, labels chartDict['xAxis']['categories'] = columns[0] # Second, all the data for (index,", "columnChart(self, data): chartDict = self.chartJSON columns = zip(*data) # First, labels chartDict['xAxis']['categories'] =", "= \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs, dbPath, force): charts", "array so it's legible in the JSON, but the DB expects a string", "'IV'} for (phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict", "Chart(object): def __init__(self, id, definition, db): self.db = db self.id = id self.sql", "8 : 'III', 12 : 'II/III', 16 : 'IV'} for (phase, count) in", "force) if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile =", "Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close() except db.DBException as e: print e", "utils def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile", "= id self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"] self.function =", "chartDict['series'][0]['data'] = data return chartDict def phaseChart(self, data): chartDict = self.chartJSON newData =", "#!/usr/bin/python import json import os import sys from . import db from .", "the JSON, but the DB expects a string for (chartID, chartDef) in jsonDict.iteritems():", "data): chartDict = self.chartJSON columns = zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0]", "chartDict def phaseChart(self, data): chartDict = self.chartJSON newData = [] phases = {0", "in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs,", "expects a string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID]", "json.load(jsonFile) jsonFile.close() chartDefs = {} # Have to store the SQL as an", "file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {} # Have to store the", ": 'IV'} for (phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return", "(index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict # Default function is", "= db self.id = id self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type =", "= chart.chartDict() database.close() except db.DBException as e: print e sys.exit(1) return json.dumps(charts) class", "store the SQL as an array so it's legible in the JSON, but", "{0 : 'None', 1 : '0', 2 : 'I', 4 : 'II', 6", "chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close() except db.DBException as", "db from . import utils def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath,", "(len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict", "chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs, dbPath, force): charts = {} try:", "return json.dumps(charts) class Chart(object): def __init__(self, id, definition, db): self.db = db self.id", "= newData return chartDict def columnChart(self, data): chartDict = self.chartJSON columns = zip(*data)", "= getattr(self, self.function) data = self.fetchData() print data return chartMethod(data) def fetchData(self): return", "{} try: database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart =", "chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def", ": '0', 2 : 'I', 4 : 'II', 6 : 'I/II', 8 :", "labels chartDict['xAxis']['categories'] = columns[0] # Second, all the data for (index, col) in", "def __init__(self, id, definition, db): self.db = db self.id = id self.sql =", "def chartDict(self): chartMethod = getattr(self, self.function) data = self.fetchData() print data return chartMethod(data)", "pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data return chartDict def phaseChart(self, data):", "print data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict =", "database.close() except db.DBException as e: print e sys.exit(1) return json.dumps(charts) class Chart(object): def", "data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict def columnChart(self, data): chartDict =", "data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict # Default", "= db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database)", "database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id] =", "for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict # Default function", "open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close()", "JSON, but the DB expects a string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"]", "'II', 6 : 'I/II', 8 : 'III', 12 : 'II/III', 16 : 'IV'}", "import sys from . import db from . import utils def create(chartsPath, dbPath,", "e sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self, id, definition, db): self.db =", "in the JSON, but the DB expects a string for (chartID, chartDef) in", "Second, all the data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return", "self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] = data return chartDict def", "12 : 'II/III', 16 : 'IV'} for (phase, count) in data: newData.append([phases[phase], count])", "chartDict = self.chartJSON columns = zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0] #", "chartDict def columnChart(self, data): chartDict = self.chartJSON columns = zip(*data) # First, labels", "columns[0] # Second, all the data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] =", ": 'II', 6 : 'I/II', 8 : 'III', 12 : 'II/III', 16 :", "definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function)", "import db from . import utils def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(),", "chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data'] =", "'I/II', 8 : 'III', 12 : 'II/III', 16 : 'IV'} for (phase, count)", "4 : 'II', 6 : 'I/II', 8 : 'III', 12 : 'II/III', 16", "1 : '0', 2 : 'I', 4 : 'II', 6 : 'I/II', 8", "2 : 'I', 4 : 'II', 6 : 'I/II', 8 : 'III', 12", "return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON chartDict['series'][0]['data']", "chartDefs def createCharts(chartDefs, dbPath, force): charts = {} try: database = db.DBManager(dbPath) database.open(force)", "chartDict['series'][0]['data'] = newData return chartDict def columnChart(self, data): chartDict = self.chartJSON columns =", "jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close()", "= {} # Have to store the SQL as an array so it's", "return chartDefs def createCharts(chartDefs, dbPath, force): charts = {} try: database = db.DBManager(dbPath)", "dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath, 'w')", "chartMethod = getattr(self, self.function) data = self.fetchData() print data return chartMethod(data) def fetchData(self):", "self.id = id self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"] self.function", "definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function) data = self.fetchData()", "= data return chartDict def phaseChart(self, data): chartDict = self.chartJSON newData = []", "chart.chartDict() database.close() except db.DBException as e: print e sys.exit(1) return json.dumps(charts) class Chart(object):", "# Second, all the data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col", "= definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function) data =", "getattr(self, self.function) data = self.fetchData() print data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql)", "createCharts(chartDefs, dbPath, force): charts = {} try: database = db.DBManager(dbPath) database.open(force) for (chartID,", "data = self.fetchData() print data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self,", "data): chartDict = self.chartJSON newData = [] phases = {0 : 'None', 1", "phases = {0 : 'None', 1 : '0', 2 : 'I', 4 :", "newData = [] phases = {0 : 'None', 1 : '0', 2 :", "string for (chartID, chartDef) in jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef", "jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {} # Have to", "SQL as an array so it's legible in the JSON, but the DB", "chartDict = self.chartJSON chartDict['series'][0]['data'] = data return chartDict def phaseChart(self, data): chartDict =", "db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id]", "'None', 1 : '0', 2 : 'I', 4 : 'II', 6 : 'I/II',", "'0', 2 : 'I', 4 : 'II', 6 : 'I/II', 8 : 'III',", "zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0] # Second, all the data for", ". import db from . import utils def create(chartsPath, dbPath, force): jsonString =", "import json import os import sys from . import db from . import", "self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON", "def phaseChart(self, data): chartDict = self.chartJSON newData = [] phases = {0 :", "jsonDict.iteritems(): chartDef[\"sql\"] = \" \".join(chartDef[\"sql\"]) chartDefs[chartID] = chartDef return chartDefs def createCharts(chartDefs, dbPath,", "database) charts[chart.id] = chart.chartDict() database.close() except db.DBException as e: print e sys.exit(1) return", "id self.sql = definition[\"sql\"] self.name = definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"]", "= definition[\"name\"] self.type = definition[\"type\"] self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self):", "data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data): chartDict = self.chartJSON", "self.chartJSON columns = zip(*data) # First, labels chartDict['xAxis']['categories'] = columns[0] # Second, all", "= {0 : 'None', 1 : '0', 2 : 'I', 4 : 'II',", "os import sys from . import db from . import utils def create(chartsPath,", "chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs =", "= self.chartJSON chartDict['series'][0]['data'] = data return chartDict def phaseChart(self, data): chartDict = self.chartJSON", "for (phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict def", "dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile", "import os import sys from . import db from . import utils def", "in chartDefs.iteritems(): chart = Chart(chartID, chartDef, database) charts[chart.id] = chart.chartDict() database.close() except db.DBException", "self.function = definition[\"function\"] self.chartJSON = definition[\"chartJSON\"] def chartDict(self): chartMethod = getattr(self, self.function) data", "except db.DBException as e: print e sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self,", ": 'II/III', 16 : 'IV'} for (phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data']", "(phase, count) in data: newData.append([phases[phase], count]) chartDict['series'][0]['data'] = newData return chartDict def columnChart(self,", "the data for (index, col) in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict #", "6 : 'I/II', 8 : 'III', 12 : 'II/III', 16 : 'IV'} for", "'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs", "if (len(jsonString)): chartsFile = open(chartsPath, 'w') chartsFile.write(jsonString) chartsFile.close() def loadChartDefs(): jsonFile = file(utils.relativePath('charts.json'))", "it's legible in the JSON, but the DB expects a string for (chartID,", "json import os import sys from . import db from . import utils", "[] phases = {0 : 'None', 1 : '0', 2 : 'I', 4", "loadChartDefs(): jsonFile = file(utils.relativePath('charts.json')) jsonDict = json.load(jsonFile) jsonFile.close() chartDefs = {} # Have", "# Have to store the SQL as an array so it's legible in", "try: database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef) in chartDefs.iteritems(): chart = Chart(chartID,", "in enumerate(columns[1:]): chartDict['series'][index]['data'] = col return chartDict # Default function is main() if", "create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)): chartsFile = open(chartsPath,", "dbPath, force): charts = {} try: database = db.DBManager(dbPath) database.open(force) for (chartID, chartDef)", "import utils def create(chartsPath, dbPath, force): jsonString = createCharts(loadChartDefs(), dbPath, force) if (len(jsonString)):", "= self.fetchData() print data return chartMethod(data) def fetchData(self): return self.db.executeAndFetchAll(self.sql) def pieChart(self, data):", "= [] phases = {0 : 'None', 1 : '0', 2 : 'I',", "sys.exit(1) return json.dumps(charts) class Chart(object): def __init__(self, id, definition, db): self.db = db" ]
[ "# Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary", "algorithm which has top-down implementation and bottom-up implementation: # It divide the unsorted", "has top-down implementation and bottom-up implementation: # It divide the unsorted list into", "considered sorted). # Then repeatedly merge sublists to produce new sorted sublists until", "len(right): if left[i] <= right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j +=", "variant # Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n)", "return result def merge_sort(lists): if len(lists) <= 1: return lists num = int(len(lists)", "and conquer algorithm which has top-down implementation and bottom-up implementation: # It divide", "comparison-based sorting algorithm.Most implementations produce a stable sort, # which means that the", "1 element is considered sorted). # Then repeatedly merge sublists to produce new", "divide and conquer algorithm which has top-down implementation and bottom-up implementation: # It", "O(n) auxiliary # # Merge sort is an O(nlogn) comparison-based sorting algorithm.Most implementations", "Merge sort is an O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable sort,", "Space Complexity-Worst: O(n) auxiliary # # Merge sort is an O(nlogn) comparison-based sorting", "Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary #", "is an O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable sort, # which", "i, j = 0, 0 result = [] while i < len(left) and", "Time Complexity-Best: O(nlogn) typical, O(n) natural variant # Time Complexity-Average: O(nlogn) # Time", "the unsorted list into n sublists, each containing 1 element (a list of", "merge_sort(lists): if len(lists) <= 1: return lists num = int(len(lists) / 2) left", "Complexity-Worst: O(n) auxiliary # # Merge sort is an O(nlogn) comparison-based sorting algorithm.Most", "until there is only 1 sublist remaining. # This will be the sorted", "the implementation preserves the input order of equal elements in the sorted output.", "2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right) if __name__ ==", "list. # cite: wikipedia # def merge(left, right): i, j = 0, 0", "sorted list. # cite: wikipedia # def merge(left, right): i, j = 0,", "is considered sorted). # Then repeatedly merge sublists to produce new sorted sublists", "1 result += left[i:] result += right[j:] return result def merge_sort(lists): if len(lists)", "conquer algorithm which has top-down implementation and bottom-up implementation: # It divide the", "2015. # # Merge sort # Data Structure: Array # Time Complexity-Best: O(nlogn)", "Merge sort # Data Structure: Array # Time Complexity-Best: O(nlogn) typical, O(n) natural", "auxiliary # # Merge sort is an O(nlogn) comparison-based sorting algorithm.Most implementations produce", "= [] while i < len(left) and j < len(right): if left[i] <=", "num = int(len(lists) / 2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left,", "to produce new sorted sublists until there is only 1 sublist remaining. #", "# def merge(left, right): i, j = 0, 0 result = [] while", "# Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary # # Merge sort", "implementations produce a stable sort, # which means that the implementation preserves the", "1 else: result.append(right[j]) j += 1 result += left[i:] result += right[j:] return", "bottom-up implementation: # It divide the unsorted list into n sublists, each containing", "which has top-down implementation and bottom-up implementation: # It divide the unsorted list", "# Merge sort # Data Structure: Array # Time Complexity-Best: O(nlogn) typical, O(n)", "while i < len(left) and j < len(right): if left[i] <= right[j]: result.append(left[i])", "# Data Structure: Array # Time Complexity-Best: O(nlogn) typical, O(n) natural variant #", "sorted). # Then repeatedly merge sublists to produce new sorted sublists until there", "that the implementation preserves the input order of equal elements in the sorted", "sort is an O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable sort, #", "of 1 element is considered sorted). # Then repeatedly merge sublists to produce", "stable sort, # which means that the implementation preserves the input order of", "0 result = [] while i < len(left) and j < len(right): if", "sort # Data Structure: Array # Time Complexity-Best: O(nlogn) typical, O(n) natural variant", "sorting algorithm.Most implementations produce a stable sort, # which means that the implementation", "a stable sort, # which means that the implementation preserves the input order", "Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary # # Merge sort is", "j += 1 result += left[i:] result += right[j:] return result def merge_sort(lists):", "# This will be the sorted list. # cite: wikipedia # def merge(left,", "if len(lists) <= 1: return lists num = int(len(lists) / 2) left =", "# cite: wikipedia # def merge(left, right): i, j = 0, 0 result", "means that the implementation preserves the input order of equal elements in the", "Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary # # Merge sort is an", "O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable sort, # which means that", "result += left[i:] result += right[j:] return result def merge_sort(lists): if len(lists) <=", "merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right) if __name__ == '__main__': a =", "left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right) if __name__ == '__main__':", "by sennhviwang # Time: Sun Sep 27 16:23:38 CST 2015. # # Merge", "Structure: Array # Time Complexity-Best: O(nlogn) typical, O(n) natural variant # Time Complexity-Average:", "repeatedly merge sublists to produce new sorted sublists until there is only 1", "only 1 sublist remaining. # This will be the sorted list. # cite:", "if left[i] <= right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j += 1", "implementation: # It divide the unsorted list into n sublists, each containing 1", "preserves the input order of equal elements in the sorted output. # #", "algorithm.Most implementations produce a stable sort, # which means that the implementation preserves", "< len(left) and j < len(right): if left[i] <= right[j]: result.append(left[i]) i +=", "elements in the sorted output. # # Mergesort is a divide and conquer", "each containing 1 element (a list of 1 element is considered sorted). #", "typical, O(n) natural variant # Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) #", "27 16:23:38 CST 2015. # # Merge sort # Data Structure: Array #", "Array # Time Complexity-Best: O(nlogn) typical, O(n) natural variant # Time Complexity-Average: O(nlogn)", "sublists to produce new sorted sublists until there is only 1 sublist remaining.", "right[j:] return result def merge_sort(lists): if len(lists) <= 1: return lists num =", "<= 1: return lists num = int(len(lists) / 2) left = merge_sort(lists[:num]) right", "an O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable sort, # which means", "+= left[i:] result += right[j:] return result def merge_sort(lists): if len(lists) <= 1:", "top-down implementation and bottom-up implementation: # It divide the unsorted list into n", "# Time Complexity-Best: O(nlogn) typical, O(n) natural variant # Time Complexity-Average: O(nlogn) #", "# which means that the implementation preserves the input order of equal elements", "= int(len(lists) / 2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right)", "left[i] <= right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j += 1 result", "<= right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j += 1 result +=", "= merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right) if __name__ == '__main__': a", "sublist remaining. # This will be the sorted list. # cite: wikipedia #", "sort, # which means that the implementation preserves the input order of equal", "new sorted sublists until there is only 1 sublist remaining. # This will", "def merge(left, right): i, j = 0, 0 result = [] while i", "equal elements in the sorted output. # # Mergesort is a divide and", "# Mergesort is a divide and conquer algorithm which has top-down implementation and", "sennhviwang # Time: Sun Sep 27 16:23:38 CST 2015. # # Merge sort", "[] while i < len(left) and j < len(right): if left[i] <= right[j]:", "# It divide the unsorted list into n sublists, each containing 1 element", "cite: wikipedia # def merge(left, right): i, j = 0, 0 result =", "O(nlogn) typical, O(n) natural variant # Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn)", "It divide the unsorted list into n sublists, each containing 1 element (a", "divide the unsorted list into n sublists, each containing 1 element (a list", "there is only 1 sublist remaining. # This will be the sorted list.", "This will be the sorted list. # cite: wikipedia # def merge(left, right):", "return lists num = int(len(lists) / 2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:])", "(a list of 1 element is considered sorted). # Then repeatedly merge sublists", "0, 0 result = [] while i < len(left) and j < len(right):", "Sep 27 16:23:38 CST 2015. # # Merge sort # Data Structure: Array", "+= 1 result += left[i:] result += right[j:] return result def merge_sort(lists): if", "O(n) natural variant # Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) # Space", "is a divide and conquer algorithm which has top-down implementation and bottom-up implementation:", "i < len(left) and j < len(right): if left[i] <= right[j]: result.append(left[i]) i", "left[i:] result += right[j:] return result def merge_sort(lists): if len(lists) <= 1: return", "right): i, j = 0, 0 result = [] while i < len(left)", "sorted sublists until there is only 1 sublist remaining. # This will be", "be the sorted list. # cite: wikipedia # def merge(left, right): i, j", "result def merge_sort(lists): if len(lists) <= 1: return lists num = int(len(lists) /", "wikipedia # def merge(left, right): i, j = 0, 0 result = []", "16:23:38 CST 2015. # # Merge sort # Data Structure: Array # Time", "sublists until there is only 1 sublist remaining. # This will be the", "Time: Sun Sep 27 16:23:38 CST 2015. # # Merge sort # Data", "# Created by sennhviwang # Time: Sun Sep 27 16:23:38 CST 2015. #", "## # Created by sennhviwang # Time: Sun Sep 27 16:23:38 CST 2015.", "O(nlogn) # Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary # # Merge", "sorted output. # # Mergesort is a divide and conquer algorithm which has", "+= 1 else: result.append(right[j]) j += 1 result += left[i:] result += right[j:]", "1 element (a list of 1 element is considered sorted). # Then repeatedly", "O(nlogn) # Space Complexity-Worst: O(n) auxiliary # # Merge sort is an O(nlogn)", "into n sublists, each containing 1 element (a list of 1 element is", "= 0, 0 result = [] while i < len(left) and j <", "lists num = int(len(lists) / 2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return", "int(len(lists) / 2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right) if", "in the sorted output. # # Mergesort is a divide and conquer algorithm", "produce a stable sort, # which means that the implementation preserves the input", "# # Mergesort is a divide and conquer algorithm which has top-down implementation", "else: result.append(right[j]) j += 1 result += left[i:] result += right[j:] return result", "1: return lists num = int(len(lists) / 2) left = merge_sort(lists[:num]) right =", "will be the sorted list. # cite: wikipedia # def merge(left, right): i,", "sublists, each containing 1 element (a list of 1 element is considered sorted).", "element is considered sorted). # Then repeatedly merge sublists to produce new sorted", "remaining. # This will be the sorted list. # cite: wikipedia # def", "the sorted list. # cite: wikipedia # def merge(left, right): i, j =", "# # Merge sort is an O(nlogn) comparison-based sorting algorithm.Most implementations produce a", "implementation preserves the input order of equal elements in the sorted output. #", "j < len(right): if left[i] <= right[j]: result.append(left[i]) i += 1 else: result.append(right[j])", "def merge_sort(lists): if len(lists) <= 1: return lists num = int(len(lists) / 2)", "right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j += 1 result += left[i:]", "Sun Sep 27 16:23:38 CST 2015. # # Merge sort # Data Structure:", "implementation and bottom-up implementation: # It divide the unsorted list into n sublists,", "# Space Complexity-Worst: O(n) auxiliary # # Merge sort is an O(nlogn) comparison-based", "and j < len(right): if left[i] <= right[j]: result.append(left[i]) i += 1 else:", "containing 1 element (a list of 1 element is considered sorted). # Then", "merge(left, right): i, j = 0, 0 result = [] while i <", "the sorted output. # # Mergesort is a divide and conquer algorithm which", "is only 1 sublist remaining. # This will be the sorted list. #", "and bottom-up implementation: # It divide the unsorted list into n sublists, each", "Created by sennhviwang # Time: Sun Sep 27 16:23:38 CST 2015. # #", "unsorted list into n sublists, each containing 1 element (a list of 1", "output. # # Mergesort is a divide and conquer algorithm which has top-down", "list into n sublists, each containing 1 element (a list of 1 element", "# Then repeatedly merge sublists to produce new sorted sublists until there is", "/ 2) left = merge_sort(lists[:num]) right = merge_sort(lists[num:]) return merge(left, right) if __name__", "right = merge_sort(lists[num:]) return merge(left, right) if __name__ == '__main__': a = [23,4,5,76,21,54,6,7,123,6567,432,872,23]", "n sublists, each containing 1 element (a list of 1 element is considered", "of equal elements in the sorted output. # # Mergesort is a divide", "merge sublists to produce new sorted sublists until there is only 1 sublist", "+= right[j:] return result def merge_sort(lists): if len(lists) <= 1: return lists num", "CST 2015. # # Merge sort # Data Structure: Array # Time Complexity-Best:", "Data Structure: Array # Time Complexity-Best: O(nlogn) typical, O(n) natural variant # Time", "which means that the implementation preserves the input order of equal elements in", "input order of equal elements in the sorted output. # # Mergesort is", "a divide and conquer algorithm which has top-down implementation and bottom-up implementation: #", "len(left) and j < len(right): if left[i] <= right[j]: result.append(left[i]) i += 1", "result.append(left[i]) i += 1 else: result.append(right[j]) j += 1 result += left[i:] result", "result = [] while i < len(left) and j < len(right): if left[i]", "result.append(right[j]) j += 1 result += left[i:] result += right[j:] return result def", "produce new sorted sublists until there is only 1 sublist remaining. # This", "Then repeatedly merge sublists to produce new sorted sublists until there is only", "Mergesort is a divide and conquer algorithm which has top-down implementation and bottom-up", "# Time: Sun Sep 27 16:23:38 CST 2015. # # Merge sort #", "result += right[j:] return result def merge_sort(lists): if len(lists) <= 1: return lists", "j = 0, 0 result = [] while i < len(left) and j", "Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) # Space Complexity-Worst: O(n) auxiliary # #", "element (a list of 1 element is considered sorted). # Then repeatedly merge", "len(lists) <= 1: return lists num = int(len(lists) / 2) left = merge_sort(lists[:num])", "< len(right): if left[i] <= right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j", "= merge_sort(lists[num:]) return merge(left, right) if __name__ == '__main__': a = [23,4,5,76,21,54,6,7,123,6567,432,872,23] print(merge_sort(a))", "list of 1 element is considered sorted). # Then repeatedly merge sublists to", "Complexity-Best: O(nlogn) typical, O(n) natural variant # Time Complexity-Average: O(nlogn) # Time Complexity-Worst:", "i += 1 else: result.append(right[j]) j += 1 result += left[i:] result +=", "natural variant # Time Complexity-Average: O(nlogn) # Time Complexity-Worst: O(nlogn) # Space Complexity-Worst:", "# Merge sort is an O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable", "the input order of equal elements in the sorted output. # # Mergesort", "# # Merge sort # Data Structure: Array # Time Complexity-Best: O(nlogn) typical,", "1 sublist remaining. # This will be the sorted list. # cite: wikipedia", "order of equal elements in the sorted output. # # Mergesort is a", "<gh_stars>0 ## # Created by sennhviwang # Time: Sun Sep 27 16:23:38 CST" ]
[ "\"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI) The KSI and OVER metrics were", "(e.g. capacity factor, mean csi) :param taxis (optional): Axis along which the means", ":param y: Vector of forecasts :returns: Correlation Coefficient \"\"\" assert len(x) == len(y)", "return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1):", "xdiff * xdiff ydiff2 += ydiff * ydiff cnt += 1 if cnt", "an observation and forecast vector are given. Additionaly a normalizing value must be", ":returns: MBE \"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def", "and description of some from Zhang et al., 2013, Metrics for Evaluating the", "is the third standardized moment Assuming that forecast errors are equal to forecast", "ability of the classifier not to label as positive a sample that is", "along which the means are computed :returns: MAPE \"\"\" if taxis >= 0:", "of metrics\"\"\" a = \"Number of measurements = %d (%.2f) \\n \" %", "Interval distance d = ( Pmax - Pmin ) / m ksi =", "Assume len(x) == len(y) n = len(x) sum_x = float(sum(x)) sum_y = float(sum(y))", "0. Look at sklearn.metrics.precision_score for details how to use In case of binary", "- avg_y diffprod += xdiff * ydiff xdiff2 += xdiff * xdiff ydiff2", "were proposed by Espinar et al. 12. The Kolmogorov–Smirnov (KS) test is a", "return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32)", "# Difference deltak = np.subtract(csi0,csi1) # calculate standard deviation only if number of", "rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error (RMSE) if an observation and forecast", "dominates the forecast. By definition a persistence forecast has a sscore = 0.", "are compared. Description: The MBE metric intends to indicate average forecast bias. Understanding", "of irradiance forecasts :param cls: vector of clear sky reference values :param cmin:", "default is 50 W/m2. :returns: Solar irradiance variability score ( scalar ) VI", "Integral (KSI) The KSI and OVER metrics were proposed by Espinar et al.", "irradiance over a subset time window of Nw data points\" :param x: vector", "\"\"\" Calculate mean absolute percentage error (MAPE) if an observation and forecast vector", "difference between the 75th percentile and the 25th percentile. This function returns the", "+ \"CORR = %.4f \\n \" % pearson(x, y) if p != \"\":", "as the standard deviation of a model forecast error divided by the esti-", "\"proposed metric for evaluation of solar forecasting models\" Description: \"Solar variability V is", "kurtosis of a sample distribution and that of the normal distribution is known", "return iqr_x - iqr_y def r2(y,x): \"\"\" Calculates coefficient of determination R^2 Description:", "metric for quantifying irradiance and pv output variability\" Description: Solar Variability VI over", "values :param t: int, optional: Timelag/stepsize t in indizes for increments :param cmin:", "synonymously with excess kurtosis. A distribution with a positive kurtosis value is known", "np.nan x[cls<=cmin] = np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs):", "clear sky reference to be used in the calculations. default is 50 W/m2.", "def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined by Stein et al. \"The", "absolute error (MaxAE) if an observation and forecast vector are given. Both vectors", "clear sky index for time t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak =", "== False) & (y_true == True),axis=taxis) return np.divide( (TP + TN) , float((TP", "Pmin = np.min(x) # Interval distance d = ( Pmax - Pmin )", "float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2), x)) sum_y_sq =", "positive TP and false positive FP for the given range of thresholds \"\"\"", "number of very small forecast errors :param x: vector of observations :param y:", "the means are computed :returns: Correlation Coefficient \"\"\" ndims = X.ndim assert ndims", "sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2), x))", "same index are compared. Description: Skewness is a measure of the asymmetry of", "over-forecasting events are not equal. An over-forecasting tendency could lead to a less", "of forecasts :returns: Skewness \"\"\" from scipy.stats import skew return skew(x-y) def kurtosis(x,y):", "+= 1 if cnt == 0: return np.nan return diffprod / np.sqrt(xdiff2 *", "idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx] - avg_x", "1s. \"\"\" #from sklearn.metrics import accuracy_score TP = np.sum((y_pred == True) & (y_true", "forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1", "> 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V, deltak def", "the measured solar irradiance to that of a clear sky solar irradiance so", "defined as the maximum value of the absolute difference between two cumulative distribution", "that of a clear sky solar irradiance so that the diurnal variability is", "the second dimension. Increments are calculated in the second dimension, while iterating is", "in the second dimension. Increments are calculated in the second dimension, while iterating", "time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky index for time t csi1", "values :param t: timelag for variability calculations :param cmin: minimum values of clear", "Error(Reference) ) :param x: Vector of observation values :param y: Vector of forecast", "iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x -", "a comparison of the variance of the errors to the variance of the", "ths: y_pred = y >= th y_true = x >= th TP[cnt] =", "r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den) return r def", "to determine if two data sets are significantly different. The KS statistic D", "* y, x, y)) num = psum - (sum_x * sum_y/n) den =", "for under-forecasting and over-forecasting events are not equal. An over-forecasting tendency could lead", "fp the number of false positives. The precision is intuitively the ability of", "et al. \"The variability index: A new and novel metric for quantifying irradiance", "is a nonparametric test to determine if two data sets are significantly different.", "if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V,", ">= 0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num", "are significantly different. The KS statistic D is defined as the maximum value", "conversely, how fat-tailed the distribution is, and is the fourth standardized moment The", "datapoints is large enough if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V", "standardized moment The difference between the kurtosis of a sample distribution and that", "Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis == 0: Xm = Xm.T if taxis", "an improved solar forecasting skill. :param x: Vector of obserations :param y: Vector", "0 means the variability dominates the forecast. By definition a persistence forecast has", "that forecast errors are equal to forecast power minus actual power, a positive", "difference of two IQR. Input: :param x: Vector of observation values :param y:", "the range :returns tp,fp: returns vector of true positive TP and false positive", "minmax[1] - minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths:", "you can use boolean arrays or just 0 or 1s. \"\"\" from sklearn.metrics", "0: ym = ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32)", "sky index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear", "np.isnan(y[idx]): continue xdiff = x[idx] - avg_x ydiff = y[idx] - avg_y diffprod", "capacity factor, average CSI,... Both vectors must have same length, so pairs of", "ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with axis functionality) Description: Pearson’s", "indizes for increments :param cmin: float, optional: minimum values of clear sky reference", "not sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63 / np.sqrt(N) D = np.max(cdf_x", "== True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred == True) & (y_true == False),axis=taxis)", "biae error (MBE) if an observation and forecast vector are given. Both vectors", "and forecast vector are given. Both vectors must have same length, so pairs", "/ Error(Reference) ) :param x: Vector of observation values :param y: Vector of", "(2012) \"proposed metric for evaluation of solar forecasting models\" Description: \"Solar variability V", "x)) sum_y_sq = sum(map(lambda x: pow(x, 2), y)) psum = sum(map(lambda x, y:", "has been widely used in regression problems and by the renewable energy industry", "index are compared. Description: The MAE has been widely used in regression problems", "label as positive a sample that is negative. The best value is 1", "so pairs of elements with same index are compared. Description: The MBE metric", "subsequent anIn [142]: U alysis, the term kurtosis will be treated synonymously with", "large enough if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan", "(optional): Axis along which the means are computed :returns: Correlation Coefficient \"\"\" ndims", "for quantifying irradiance and pv output variability\" Description: Solar Variability VI over a", "np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates", "Pmax - Pmin ) / m ksi = np.sum(D) def pearsonr(x, y): #", "elements with same index are compared. Description: The RMSE provides a global error", "fat-tailed the distribution is, and is the fourth standardized moment The difference between", "60s = %.4f \\n \" % sscore(x, y, c, 60) if p !=", "% np.nanmean(c) a = a + \"SSCORE 60s = %.4f \\n \" %", ":param x: Vector of observation values :param y: Vector of forecast values :returns", "timelag for variability calculations :param cmin: minimum values of clear sky reference to", "forecast errors. The MaxAE metric is useful to evaluate the forecasting of short-term", "of irradiance forecasts :param cls: vector of clear sky reference values :param t:", "forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0):", "to an under-forecasting tail. The tendency to over-forecast (or under-forecast) is important in", "as the maximum value of the absolute difference between two cumulative distribution functions", "== 1: # clear sky index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0", "TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case of binary forecasts", "a + \"CORR = %.4f \\n \" % pearson(x, y) if p !=", "irange = slice(s,e) items = [slice(None, None, None)] * ndims items[taxis] = irange", "np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a + \"Number of forecasts", "D = np.max(cdf_x - cdf_y) # Observation maximum and minimum Pmax = np.max(x);", "if number of datapoints is large enough if np.sum(np.isfinite(deltak)) > 5: V =", "thresholds \"\"\" if taxis >= 0: shape = list(x.shape) wh = shape[taxis] shape[taxis]", "\"SSCORE Persistence 60s = %.4f \\n \" % sscore(x, p, c, 60) return", "y)) psum = sum(map(lambda x, y: x * y, x, y)) num =", "the 75th percentile and the 25th percentile. This function returns the difference of", "defined forecast uncertainity U and the timeseries variability V. sscore = 1 means", "metric; a larger value of Pearson’s correlation coefficient indicates an improved solar forecasting", "def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE) if an observation and", "to the variance of the data which is to be modeled Input: :param", "is the standard deviation of the step-changes of the measured solar irradiance to", "calculated as the standard deviation of all increments. :param x: float vector of", "above defined forecast uncertainity U and the timeseries variability V. sscore = 1", "if an observation and forecast vector are given. Additionaly a normalizing value must", "comparison of the variance of the errors to the variance of the data", "corresponding clear sky irradiance values :param t: int, optional: Timelag/stepsize t in indizes", "c: clear sky vector :param p: reference vector :returns a: a string with", "overall forecast bias (over- or under- forecasting) would allow power system operators to", "sky reference values :param cmin: minimum values of clear sky reference to be", "distribution of the forecast error if an observation and forecast vector are given.", "/ den def pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s correlation", "along this axis. If two-dimensional vectors are provided subsequent instances must be in", "% (np.nanmean(x), np.nanmean(x / c)) a = a + \"MEAN FOR = %.4f", ":returns: V = solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input array", "\"RMSE = %.4f \\n \" % rmse(x, y) a = a + \"BIAS", "distribution functions (CDFs), expressed as :param x: Vector of observation values :param y:", "len(x) assert n > 0 avg_x = np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32)", "treated synonymously with excess kurtosis. A distribution with a positive kurtosis value is", "#from sklearn.metrics import accuracy_score TP = np.sum((y_pred == True) & (y_true == True),axis=taxis)", "KS statistic D is defined as the maximum value of the absolute difference", "which need to be corrected through the starting of more expensive, but faster", "error measure metric, which, unlike the RMSE metric, does not excessively account for", "given, e.g. capacity factor, average CSI,... Both vectors must have same length, so", "entire forecasting period. :param x: vector of observations :param y: vector of forecasts", "index are compared. Description: The MBE metric intends to indicate average forecast bias.", "Description: \"Solar variability V is the standard deviation of the step-changes of the", "scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR", ":param x: Vector of observation values :param y: Vector of forecast values :returns:", "over a subset time window of Nw data points\" :param x: vector of", "return r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error (RMSE) if an", "of forecast errors. The MaxAE metric is useful to evaluate the forecasting of", "+ t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def", "50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return 1", "prodx = np.multiply( diffx, diffx ) prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)]", "np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error", "r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error (RMSE) if an observation", "Axis along which the means are computed :returns: MBE \"\"\" if taxis >=", "np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a + \"RMSE = %.4f \\n \" %", "== len(y) n = len(x) assert n > 0 avg_x = np.nanmean(x )", "clear sky reference values :param t: timelag for variability calculations :param cmin: minimum", "items = [slice(None, None, None)] * ndims items[taxis] = irange return arr[tuple(items)] nd", "prints(x, y, c, p=\"\"): \"\"\" Gives a summary of error metrics :param x:", "np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error (MAPE) if an observation", ":return U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) )", "increments. :param x: float vector of irradiance values :param cls: float vector of", "(IQR Diff) of a two given datasets Description: (not from the paper) IQR", "and Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\" Description: The", "less than optimal number of large thermal units being committed, which need to", "ydiff = y[idx] - avg_y diffprod += xdiff * ydiff xdiff2 += xdiff", "\"length\" of the measured irradiance plotted against time divided by the \"length\" of", "= [slice(None, None, None)] * ndims items[taxis] = irange return arr[tuple(items)] nd =", "clear sky index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) #", ":param nbins: number of bins/thresholds inside the range :returns tp,fp: returns vector of", "Solar Power Forecasting, conference paper, 3rd International Workshop on Integration of Solar Power", "of Pearson’s correlation coefficient indicates an improved solar forecasting skill. :param x: Vector", "forecasting models\" Description: The metric sscore is calculated as the ratio of the", "\"\"\" y = cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2", "forecast values :returns: R^2 \"\"\" r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x)", "#csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky index for time t", "error (MaxAE) if an observation and forecast vector are given. Both vectors must", "index are compared. Description: The RMSE provides a global error measure during the", "pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s correlation coefficient is a", "FP = np.sum((y_pred == True) & (y_true == False),axis=taxis) FN = np.sum((y_pred ==", "are normalized to a given value. :param x: vector of observations :param y:", "instances must be in the second dimension. Increments are calculated in the second", "the asymmetry of the probability distribution, and is the third standardized moment Assuming", "import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR Diff)", "= np.sum(D) def pearsonr(x, y): # Assume len(x) == len(y) n = len(x)", "Persistence 60s = %.4f \\n \" % sscore(x, p, c, 60) return a", "rmse(x, y) a = a + \"BIAS = %.4f \\n \" % mbe(x,", "MaxAE metric is useful to evaluate the forecasting of short-term extreme events in", "is useful to evaluate the forecasting of short-term extreme events in the power", "sky reference to be used in the calculations. default is 50 W/m2. :return", "= 0 ydiff2 = 0 cnt = 0 for idx in range(n): if", "used in the calculations. default is 50 W/m2. :return U: forecast uncertainty \"\"\"", "def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI) The KSI and OVER", "else: TP = np.empty(nbins) FP = np.empty(nbins) x = x.flatten() y = y.flatten()", "given. Additionaly a normalizing value must be given, e.g. capacity factor, average CSI,...", "means are computed :returns: MAE \"\"\" if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32)", "of bins/thresholds inside the range :returns tp,fp: returns vector of true positive TP", "can use boolean arrays or just 0 or 1s. \"\"\" from sklearn.metrics import", "error (RMSE) if an observation and forecast vector are given. Both vectors must", "two cumulative distribution functions (CDFs), expressed as :param x: Vector of observation values", "# Interval distance d = ( Pmax - Pmin ) / m ksi", "elements with same index are compared. Description: Same as MAE but normalized differences", "y[cls<=cmin] = np.nan if nd == 1: # clear sky index for time", "of thresholds, give a tupel (e.g. (0,1) in ) :param nbins: number of", "cls.copy() # don't use values for low irradiance values y[cls<=cmin] = np.nan if", "of some from Zhang et al., 2013, Metrics for Evaluating the Accuracy of", "psum - (sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) / n)", "dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1):", "np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast", "1 if cnt == 0: return np.nan return diffprod / np.sqrt(xdiff2 * ydiff2)", "\\n \" % mbe(x, y) a = a + \"CORR = %.4f \\n", "been widely used in regression problems and by the renewable energy industry to", "between the kurtosis of a sample distribution and that of the normal distribution", "models\" Description: \"Solar variability V is the standard deviation of the step-changes of", "irradiance variability score ( scalar ) VI \"\"\" y = cls.copy() y[cls<=cmin] =", "np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for", "excess kurtosis. In the subsequent anIn [142]: U alysis, the term kurtosis will", "Description: The MaxAE is an indicative of local deviations of forecast errors. The", "= rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness", "+ \"MEAN CLS = %.4f \\n \" % np.nanmean(c) a = a +", "= Xm.T if taxis == 0: ym = ym.T else: Xm = np.nanmean(X,", "taxis (optional): Axis along which the means are computed :returns: MaxAE \"\"\" if", "same index are compared. Description: The MAE has been widely used in regression", "difference between the kurtosis of a sample distribution and that of the normal", "al. \"The variability index: A new and novel metric for quantifying irradiance and", "(FS) FS is defined as 1 - ( Error(Forecast) / Error(Reference) ) :param", "np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\"", "metrics\"\"\" a = \"Number of measurements = %d (%.2f) \\n \" % (x.shape[0]", "reference vector :returns a: a string with a number of metrics\"\"\" a =", "where tp is the number of true positives and fp the number of", "\"The variability index: A new and novel metric for quantifying irradiance and pv", "= a + \"MEAN CLS = %.4f \\n \" % np.nanmean(c) a =", "& (y_true == True),axis=taxis) return np.divide( (TP + TN) , float((TP + FP", "use boolean arrays or just 0 or 1s. \"\"\" from sklearn.metrics import precision_score", "and minimum Pmax = np.max(x); Pmin = np.min(x) # Interval distance d =", "kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR Diff) of a two", "ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply(", "negative. The best value is 1 and the worst value is 0. Look", "of elements with same index are compared. Description: The MAE has been widely", "represent a large number of very small forecast errors :param x: vector of", "x: Vector of observation values :param y: Vector of forecast values :returns: IQR", "vector of clear sky index increments :returns: V = solar variability \"\"\" def", "Solar Variability VI over a period of time is calculated as the ratio", "n), 0.5) if den == 0: return 0 return num / den def", "a number of metrics\"\"\" a = \"Number of measurements = %d (%.2f) \\n", "kurtosis value is known as leptokurtic, which indicates a peaked distribution; whereas a", "U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def", "th y_true = x >= th TP[cnt] = np.sum((y_pred == True) & (y_true", "%.4f \\n \" % FS(x, y, p) a = a + \"MEAN OBS", "cmin: minimum values of clear sky reference to be used in the calculations.", "if two data sets are significantly different. The KS statistic D is defined", "peaks of the leptokurtic distribution represent a large number of very small forecast", "evaluate the forecasting of short-term extreme events in the power system. :param x:", "- pow(sum_y, 2) / n), 0.5) if den == 0: return 0 return", "a peaked distribution; whereas a negative kurtosis indicates a flat data distribution, known", "are computed :returns: MBE \"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return", "Assuming that forecast errors are equal to forecast power minus actual power, a", "Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI) The", "== 2: # clear sky index for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) #", "window of Nw data points\" :param x: vector of irradiance values :param y:", "r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den)", "measured solar irradiance to that of a clear sky solar irradiance so that", "metric intends to indicate average forecast bias. Understanding the overall forecast bias (over-", "continue xdiff = x[idx] - avg_x ydiff = y[idx] - avg_y diffprod +=", "for low irradiance values y[cls<=cmin] = np.nan if nd == 1: # clear", "binary forecasts you can use boolean arrays or just 0 or 1s. \"\"\"", "p) a = a + \"SSCORE Persistence 60s = %.4f \\n \" %", "score: In case of binary forecasts you can use boolean arrays or just", ":param minmax: range of thresholds, give a tupel (e.g. (0,1) in ) :param", "= %d (%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) /", "cls: vector of clear sky reference values :param cmin: minimum values of clear", "would be ~ 1. The same is for very overcast days. Higher variability", "forecasting skill. :param x: Vector of obserations :param y: Vector of forecasts :param", "in the calculations. default is 50 W/m2. :returns: Solar irradiance variability score (", "´taxis´\"\"\" irange = slice(s,e) items = [slice(None, None, None)] * ndims items[taxis] =", "test to determine if two data sets are significantly different. The KS statistic", "csi) :param taxis (optional): Axis along which the means are computed :returns: MAPE", "resources for compensating forecast errors in the dispatch process. :param x: vector of", "The precision is intuitively the ability of the classifier not to label as", "reference to be used in the calculations. default is 50 W/m2. :returns sscore:", "np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the", "metric for evaluation of solar forecasting models\" Description: \"Solar variability V is the", "\" % mbe(x, y) a = a + \"CORR = %.4f \\n \"", "error metrics :param x: observation vector :param y: forecast vector :param c: clear", "values of clear sky reference to be used in the calculations. default is", "are given. Additionaly a normalizing value must be given, e.g. capacity factor, average", "def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS is defined as 1 -", "= a + \"CORR = %.4f \\n \" % pearson(x, y) if p", "V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined by Stein et", "Integration of Solar Power into Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the", "\" % pearson(x, y) if p != \"\": a = a + \"FS", "need to be corrected through the starting of more expensive, but faster starting,", "def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error (MAPE) if an observation and", "np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if taxis >=", "variability calculations :param cmin: minimum values of clear sky reference to be used", "for Evaluating the Accuracy of Solar Power Forecasting, conference paper, 3rd International Workshop", "float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2), x)) sum_y_sq = sum(map(lambda x: pow(x,", "float, optional: minimum values of clear sky reference to be used in the", "the means are computed :returns: MaxAE \"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32)", "\" % (np.nanmean(x), np.nanmean(x / c)) a = a + \"MEAN FOR =", "function returns the difference of two IQR. Input: :param x: Vector of observation", "index increments :returns: V = solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the", "(MAPE) if an observation and forecast vector are given. Additionaly a normalizing value", "= np.empty(nbins) FP = np.empty(nbins) x = x.flatten() y = y.flatten() wh =", "\"\"\" Calculates a variability index defined by Stein et al. \"The variability index:", "in the calculations. default is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan", "if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\"", "the step-changes of the measured solar irradiance to that of a clear sky", "= np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined", "\\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a =", "RMSE \"\"\" if taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) )", "deltak = np.subtract(csi0,csi1) # calculate standard deviation only if number of datapoints is", "sscore = 0. A negative sscore means that the forecast performs worse than", "sky irradiance plotted against time. On a clear day, VI would be ~", "correlation coefficientq is a global error measure metric; a larger value of Pearson’s", "y: vector of irradiance forecasts :param cls: vector of clear sky reference values", "of large thermal units being committed, which need to be corrected through the", "anIn [142]: U alysis, the term kurtosis will be treated synonymously with excess", "== 0: return 0 return num / den def pearson(x, y): \"\"\" Calculates", "\"\"\" returns the input array ´arr´ sliced from ´s´ to ´e´ at the", "a = a + \"CORR = %.4f \\n \" % pearson(x, y) if", "given. Both vectors must have same length, so pairs of elements with same", "measure during the entire forecasting period. :param x: vector of observations :param y:", "None, None)] * ndims items[taxis] = irange return arr[tuple(items)] nd = x.ndim y", "0 avg_x = np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2", "= x[idx] - avg_x ydiff = y[idx] - avg_y diffprod += xdiff *", "= a + \"RMSE = %.4f \\n \" % rmse(x, y) a =", "avg_x ydiff = y[idx] - avg_y diffprod += xdiff * ydiff xdiff2 +=", ":param x: vector if irradiance values :param cls: vector of clear sky reference", "r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error", "is negative. The best value is 1 and the worst value is 0.", "´e´ at the specified axis ´taxis´\"\"\" irange = slice(s,e) items = [slice(None, None,", "3 if taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32)", "Power Forecasting, conference paper, 3rd International Workshop on Integration of Solar Power into", "International Workshop on Integration of Solar Power into Power Systems \"\"\" def ksi(fcst,obs):", "% (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a +", "1.63 / np.sqrt(N) D = np.max(cdf_x - cdf_y) # Observation maximum and minimum", "same index are compared. Description: Same as MAE but normalized differences are normalized", "are calculated with an moving window along this axis. If two-dimensional vectors are", "). Increments are calculated with an moving window along this axis. If two-dimensional", "for idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx] -", "True) & (y_true == False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt] cnt +=", ":param c: clear sky vector :param p: reference vector :returns a: a string", "clear sky irradiance plotted against time. On a clear day, VI would be", "as the excess kurtosis. In the subsequent anIn [142]: U alysis, the term", "* ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with axis functionality) Description:", "( Pmax - Pmin ) / m ksi = np.sum(D) def pearsonr(x, y):", "under-forecasting and over-forecasting events are not equal. An over-forecasting tendency could lead to", "index for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky index for time", "functions (CDFs), expressed as :param x: Vector of observation values :param y: Vector", "small forecast errors :param x: vector of observations :param y: vector of forecasts", "/ c)) a = a + \"MEAN FOR = %.4f (%.3f) \\n \"", "MBE metric intends to indicate average forecast bias. Understanding the overall forecast bias", "sets are significantly different. The KS statistic D is defined as the maximum", "to a given value. :param x: vector of observations :param y: vector of", "taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate", "through the starting of more expensive, but faster starting, units in the dispatch", "values :returns: R^2 \"\"\" r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x) )", "sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating", "\\n \" % pearson(x, y) if p != \"\": a = a +", "computed :returns: MAPE \"\"\" if taxis >= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32)", "in that the system actions taken to correct for under-forecasting and over-forecasting events", "is large enough if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V =", "which the means are computed :returns: MAE \"\"\" if taxis >= 0: return", "the entire forecasting period. :param x: vector of observations :param y: vector of", "vector of forecasts :returns: Skewness \"\"\" from scipy.stats import skew return skew(x-y) def", "V as introduced in Marquez and Coimbra (2012) \"proposed metric for evaluation of", "sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating solar forecast models proposed by Marquez", "(CDFs), expressed as :param x: Vector of observation values :param y: Vector of", "np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky index for time t #csi1 =", "subset time window of Nw data points\" :param x: vector of irradiance values", "to indicate average forecast bias. Understanding the overall forecast bias (over- or under-", "and by the renewable energy industry to evaluate forecast performance. The MAE metric", "diffy ) prodx = np.multiply( diffx, diffx ) prody = np.multiply( diffy, diffy", "0 or 1s. \"\"\" #from sklearn.metrics import accuracy_score TP = np.sum((y_pred == True)", "= ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx =", "(optional): Axis along which the means are computed :returns: MAE \"\"\" if taxis", ":param x: vector of observations :param y: vector of forecasts :returns: Skewness \"\"\"", "irradiance and pv output variability\" Description: Solar Variability VI over a period of", "- np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a + \"RMSE =", "units being committed, which need to be corrected through the starting of more", "then calculated as the standard deviation of all increments. :param x: float vector", "short-term extreme events in the power system. :param x: vector of observations :param", "= np.sum((y_pred == False) & (y_true == True),axis=taxis) return np.divide( (TP + TN)", "defined as 1 - ( Error(Forecast) / Error(Reference) ) :param x: Vector of", "coefficient indicates an improved solar forecasting skill. :param x: Vector of obserations :param", "the probability distribution of the forecast error if an observation and forecast vector", "x: Vector of obserations :param y: Vector of forecasts :returns: Correlation Coefficient \"\"\"", "return np.nan return diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson", "Calculates solar variability V as introduced in Marquez and Coimbra (2012) \"proposed metric", "2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5) if den", "of clear sky reference values :param t: timelag for variability calculations :param cmin:", "X.ndim assert ndims < 3 if taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32)", "the power system. :param x: vector of observations :param y: vector of forecasts", "is, and is the fourth standardized moment The difference between the kurtosis of", "The KS statistic D is defined as the maximum value of the absolute", "%d (%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0]))", "y: Vector of forecast values :returns ksi: The KSI \"\"\" m = 100.0", "= %.4f \\n \" % rmse(x, y) a = a + \"BIAS =", "(optional): Axis along which the means are computed :returns: MaxAE \"\"\" if taxis", "MAE has been widely used in regression problems and by the renewable energy", "(RMSE) if an observation and forecast vector are given. Both vectors must have", "is 50 W/m2. :returns: Solar irradiance variability score ( scalar ) VI \"\"\"", ", float((TP + FP + FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs) def", "minutes :param cmin: minimum values of clear sky reference to be used in", "0: return 0 return num / den def pearson(x, y): \"\"\" Calculates Pearson", "variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´ sliced from ´s´", "Look at sklearn.metrics.precision_score for details how to use In case of binary forecasts", "\" % sscore(x, y, c, 60) if p != \"\": a = a", "= np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return", "= ym.reshape(ym.shape[0],1) if taxis == 0: Xm = Xm.T if taxis == 0:", "to label as positive a sample that is negative. The best value is", "have same length, so pairs of elements with same index are compared. Description:", "that the diurnal variability is neglected.\" This method can use single-dimensional obervation and", "observation and forecast vector are given. Additionaly a normalizing value must be given,", "sum(map(lambda x: pow(x, 2), y)) psum = sum(map(lambda x, y: x * y,", "Pmax = np.max(x); Pmin = np.min(x) # Interval distance d = ( Pmax", "deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined by Stein et al.", "t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0):", "is the number of true positives and fp the number of false positives.", "system. :param x: vector of observations :param y: vector of forecasts :param taxis", "vector :param y: forecast vector :param minmax: range of thresholds, give a tupel", "a + \"RMSE = %.4f \\n \" % rmse(x, y) a = a", "CLS = %.4f \\n \" % np.nanmean(c) a = a + \"SSCORE 60s", "if taxis == 0: Xm = Xm.T if taxis == 0: ym =", ") def skewness(x,y): \"\"\" Calculate skewness of the probability distribution of the forecast", "than a persistence forecast. :param x: vector of irradiance values :param y: vector", "indicate average forecast bias. Understanding the overall forecast bias (over- or under- forecasting)", "definition a persistence forecast has a sscore = 0. A negative sscore means", ",dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE) if an observation and", "vector :param minmax: range of thresholds, give a tupel (e.g. (0,1) in )", "mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE) if an observation and forecast vector", "Additionaly a normalizing value must be given, e.g. capacity factor, average CSI,... Both", "precision is intuitively the ability of the classifier not to label as positive", "quantifying irradiance and pv output variability\" Description: Solar Variability VI over a period", "else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error (MAPE) if", "bias. Understanding the overall forecast bias (over- or under- forecasting) would allow power", "ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis ==", "between two cumulative distribution functions (CDFs), expressed as :param x: Vector of observation", "cmin: float, optional: minimum values of clear sky reference to be used in", "irradiance plotted against time. On a clear day, VI would be ~ 1.", "The precision is the ratio tp / (tp + fp) where tp is", "an over-forecasting tail, and a negative skewness leads to an under-forecasting tail. The", "False) & (y_true == False),axis=taxis) FP = np.sum((y_pred == True) & (y_true ==", "that the system actions taken to correct for under-forecasting and over-forecasting events are", "of very small forecast errors :param x: vector of observations :param y: vector", "Description: The metric sscore is calculated as the ratio of the above defined", "with same index are compared. Description: Same as MAE but normalized differences are", "is for very overcast days. Higher variability (changes in time) of irradiance will", "values :param y: Vector of forecast values :returns ksi: The KSI \"\"\" m", "error if an observation and forecast vector are given. Both vectors must have", "Defintion and description of some from Zhang et al., 2013, Metrics for Evaluating", "tail, and a negative skewness leads to an under-forecasting tail. The tendency to", "precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC)", "vector :param c: clear sky vector :param p: reference vector :returns a: a", "to be used in the calculations. default is 50 W/m2. :return U: forecast", "which the means are computed :returns: MAPE \"\"\" if taxis >= 0: return", "sklearn.metrics import accuracy_score TP = np.sum((y_pred == True) & (y_true == True),axis=taxis) TN", "unlike the RMSE metric, does not excessively account for extreme forecast events. :param", "of elements with same index are compared. Description: Kurtosis is a measure of", "al. 12. The Kolmogorov–Smirnov (KS) test is a nonparametric test to determine if", "observations :param y: vector of forecasts :returns: Skewness \"\"\" from scipy.stats import skew", "Description: Pearson’s correlation coefficientq is a global error measure metric; a larger value", "faster starting, units in the dispatch process. :param x: vector of observations :param", "the measured irradiance plotted against time divided by the \"length\" of the clear", "proposed by Espinar et al. 12. The Kolmogorov–Smirnov (KS) test is a nonparametric", "elements with same index are compared. Description: The MAE has been widely used", "vector of irradiance forecasts :param cls: vector of clear sky reference values :param", "are compared. Description: Skewness is a measure of the asymmetry of the probability", "= x >= th TP[cnt] = np.sum((y_pred == True) & (y_true == True),axis=taxis)", "By definition a persistence forecast has a sscore = 0. A negative sscore", "fourth standardized moment The difference between the kurtosis of a sample distribution and", "use values for low irradiance values y[cls<=cmin] = np.nan if nd == 1:", "uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\"", "= %.4f \\n \" % FS(x, y, p) a = a + \"MEAN", "datasets Description: (not from the paper) IQR is the difference between the 75th", "as :param x: Vector of observation values :param y: Vector of forecast values", "Vector of forecast values :returns: IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y", "as the ratio of the above defined forecast uncertainity U and the timeseries", "data points\" :param x: vector of irradiance values :param y: vector of irradiance", "vector of true positive TP and false positive FP for the given range", "the ratio of the above defined forecast uncertainity U and the timeseries variability", "the data which is to be modeled Input: :param x: Vector of observation", "axis. Variability is then calculated as the standard deviation of all increments. :param", "if nd == 1: # clear sky index for time t+deltat #csi0 =", "return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision", "MAE but normalized differences are normalized to a given value. :param x: vector", "cnt = 0 for idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff", "Description: Kurtosis is a measure of the magnitude of the peak of the", "and false positive FP for the given range of thresholds \"\"\" if taxis", "= cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value Vc N = len(y) if", "equidistant instances ( timeseries ). Increments are calculated with an moving window along", "with an moving window along this axis. If two-dimensional vectors are provided subsequent", "np \"\"\" Different error metrics. Defintion and description of some from Zhang et", "0 or 1s. \"\"\" from sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs) def", "(optional): Axis along which the means are computed :returns: MAPE \"\"\" if taxis", "of data points for KSI not sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63", ":param y: Vector of forecasts :param taxis (optional): Axis along which the means", "which is to be modeled Input: :param x: Vector of observation values :param", "arrays or just 0 or 1s. \"\"\" from sklearn.metrics import precision_score return precision_score(y_true,", "== True),axis=taxis) return np.divide( (TP + TN) , float((TP + FP + FN", ") else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates", "x.flatten() y = y.flatten() wh = x.shape[0] ra = minmax[1] - minmax[0] cnt", "capacity factor, mean csi) :param taxis (optional): Axis along which the means are", "that of the normal distribution is known as the excess kurtosis. In the", "details how to use In case of binary forecasts you can use boolean", "Calculate Receiver Operating Curve (ROC) :param x: observation vector :param y: forecast vector", "of determination R^2 Description: R^2 is a comparison of the variance of the", "variability dominates the forecast. By definition a persistence forecast has a sscore =", "just 0 or 1s. \"\"\" from sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs)", "method can use single-dimensional obervation and clear sky vectors with subsequent and temporal", "from scipy.stats import skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of the", "y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC) :param x: observation", "for very overcast days. Higher variability (changes in time) of irradiance will lead", "the number of false positives. The precision is intuitively the ability of the", "to a less than optimal number of large thermal units being committed, which", "the dispatch process. :param x: vector of observations :param y: vector of forecasts", "a sscore = 0. A negative sscore means that the forecast performs worse", "= psum - (sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) /", "tp / (tp + fp) where tp is the number of true positives", "taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else:", "= np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates", "index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky", "np.sum((y_pred == False) & (y_true == True),axis=taxis) return np.divide( (TP + TN) ,", "RMSE provides a global error measure during the entire forecasting period. :param x:", "= 1 - ( np.nanvar(y-x) / np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\"", "irradiance forecasts :param cls: vector of clear sky reference values :param cmin: minimum", "\"Solar variability V is the standard deviation of the step-changes of the measured", "y: forecast vector :param c: clear sky vector :param p: reference vector :returns", "as np \"\"\" Different error metrics. Defintion and description of some from Zhang", "U alysis, the term kurtosis will be treated synonymously with excess kurtosis. A", "This method can use single-dimensional obervation and clear sky vectors with subsequent and", "Vector of forecast values :returns ksi: The KSI \"\"\" m = 100.0 nbins", "The tendency to over-forecast (or under-forecast) is important in that the system actions", "x: pow(x, 2), y)) psum = sum(map(lambda x, y: x * y, x,", "returns the difference of two IQR. Input: :param x: Vector of observation values", "so pairs of elements with same index are compared. Description: The MaxAE is", "the diurnal variability is neglected.\" This method can use single-dimensional obervation and clear", "= a + \"MEAN OBS = %.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x", "and over-forecasting events are not equal. An over-forecasting tendency could lead to a", "the excess kurtosis. In the subsequent anIn [142]: U alysis, the term kurtosis", "minus actual power, a positive skewness of the forecast errors leads to an", "that is negative. The best value is 1 and the worst value is", "standard deviation of a model forecast error divided by the esti- mated clear", "#return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c, p=\"\"): \"\"\" Gives a summary", "+ \"BIAS = %.4f \\n \" % mbe(x, y) a = a +", "(\"Proposed Metrics for Evaulation of Solar Forecasting Models\") \"Here we define the uncertainty", "np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE)", "= Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis == 0: Xm = Xm.T if", "taxis >= 0: shape = list(x.shape) wh = shape[taxis] shape[taxis] = nbins TP", "on the values in the first axis. Variability is then calculated as the", "= np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 = 0", "\"Forecast Uncertainty\" as defined my Marquez and Coimbra, 2013 (\"Proposed Metrics for Evaulation", "of true positive TP and false positive FP for the given range of", "\"\"\" Calculates coefficient of determination R^2 Description: R^2 is a comparison of the", "values in the first axis. Variability is then calculated as the standard deviation", "V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as introduced in Marquez and Coimbra (2012)", "- avg_x ydiff = y[idx] - avg_y diffprod += xdiff * ydiff xdiff2", "of the data which is to be modeled Input: :param x: Vector of", "normalization (e.g. capacity factor, mean csi) :param taxis (optional): Axis along which the", "The metric sscore is calculated as the ratio of the above defined forecast", "percentile and the 25th percentile. This function returns the difference of two IQR.", "dimension. Increments are calculated in the second dimension, while iterating is done on", "the difference between the 75th percentile and the 25th percentile. This function returns", "\"\"\" from sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate", "of forecasts = %d (%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] -", "positive a sample that is negative. The best value is 1 and the", "an under-forecasting tail. The tendency to over-forecast (or under-forecast) is important in that", "are computed :returns: RMSE \"\"\" if taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y),", "for variability calculations :param cmin: minimum values of clear sky reference to be", "of a model forecast error divided by the esti- mated clear sky value", "FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c, p=\"\"): \"\"\"", "vector of forecasts :returns: Kurtosis \"\"\" from scipy.stats import kurtosis return kurtosis(x-y) def", "- iqr_y def r2(y,x): \"\"\" Calculates coefficient of determination R^2 Description: R^2 is", "The same is for very overcast days. Higher variability (changes in time) of", "reference to be used in the calculations. default is 50 W/m2. :return U:", "uncertainity U and the timeseries variability V. sscore = 1 means a perfect", "100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value", "whereas a negative kurtosis indicates a flat data distribution, known as platykurtic. The", "a: a string with a number of metrics\"\"\" a = \"Number of measurements", "between the 75th percentile and the 25th percentile. This function returns the difference", "indicates an improved solar forecasting skill. :param x: Vector of obserations :param y:", "modeled Input: :param x: Vector of observation values :param y: Vector of forecast", "statistic D is defined as the maximum value of the absolute difference between", "clear sky reference values :param t: average period in minutes :param cmin: minimum", "%.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y / c)) a = a +", "x.shape[0] ra = minmax[1] - minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for", "np.empty(shape) else: TP = np.empty(nbins) FP = np.empty(nbins) x = x.flatten() y =", "return np.divide( (TP + TN) , float((TP + FP + FN + TN)))", "n = len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x:", "as the standard deviation of all increments. :param x: float vector of irradiance", "known as platykurtic. The pronounced peaks of the leptokurtic distribution represent a large", "e.g. capacity factor, average CSI,... Both vectors must have same length, so pairs", ":returns: Solar irradiance variability score ( scalar ) VI \"\"\" y = cls.copy()", "# don't use values for low irradiance values y[cls<=cmin] = np.nan if nd", "\"\"\" Calculates \"Forecast Uncertainty\" as defined my Marquez and Coimbra, 2013 (\"Proposed Metrics", "values :param cls: vector of clear sky reference values :param t: average period", ">= th TP[cnt] = np.sum((y_pred == True) & (y_true == True),axis=taxis) / float(wh)", "= np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) # calculate standard deviation only if", "Variability is then calculated as the standard deviation of all increments. :param x:", "(or under-forecast) is important in that the system actions taken to correct for", "\"FS = %.4f \\n \" % FS(x, y, p) a = a +", "vector are given. Both vectors must have same length, so pairs of elements", "== False),axis=taxis) FP = np.sum((y_pred == True) & (y_true == False),axis=taxis) FN =", ":param x: vector of observations :param y: vector of forecasts :returns: Kurtosis \"\"\"", "= %.4f \\n \" % FS(x, y, p) a = a + \"SSCORE", "Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\" Description: The metric", "np.nan if nd == 1: # clear sky index for time t+deltat #csi0", "n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5) if den == 0:", "is a comparison of the variance of the errors to the variance of", "you can use boolean arrays or just 0 or 1s. \"\"\" #from sklearn.metrics", ":param x: observation vector :param y: forecast vector :param minmax: range of thresholds,", "mbe(x, y) a = a + \"CORR = %.4f \\n \" % pearson(x,", "of a sample distribution and that of the normal distribution is known as", "R^2 Description: R^2 is a comparison of the variance of the errors to", "the Accuracy of Solar Power Forecasting, conference paper, 3rd International Workshop on Integration", "\"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\"", "vectors must have same length, so pairs of elements with same index are", "a flat data distribution, known as platykurtic. The pronounced peaks of the leptokurtic", "of the variance of the errors to the variance of the data which", ":param cls: float vector of corresponding clear sky irradiance values :param t: int,", "solar forecasting models\" Description: \"Solar variability V is the standard deviation of the", "a two given datasets Description: (not from the paper) IQR is the difference", "np.nanmean(c) a = a + \"SSCORE 60s = %.4f \\n \" % sscore(x,", "\"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\"", "/ np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with axis", "V. sscore = 1 means a perfect forecast. sscore = 0 means the", "= np.empty(nbins) x = x.flatten() y = y.flatten() wh = x.shape[0] ra =", "sky value of the solar irradiance over a subset time window of Nw", "np.nan if taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis)", ") def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn module sklearn.metrics.precision_score The precision", "in indizes for increments :param cmin: float, optional: minimum values of clear sky", "the specified axis ´taxis´\"\"\" irange = slice(s,e) items = [slice(None, None, None)] *", "more expensive, but faster starting, units in the dispatch process. :param x: vector", ":param cls: vector of clear sky reference values :param t: timelag for variability", "= np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y", "on Integration of Solar Power into Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates", "for KSI not sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63 / np.sqrt(N) D", "observations :param y: vector of forecasts :param fac: value for normalization (e.g. capacity", "return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS is defined as", "alysis, the term kurtosis will be treated synonymously with excess kurtosis. A distribution", "taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE) if an observation and forecast vector", "1 - ( np.nanvar(y-x) / np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates", "* ndims items[taxis] = irange return arr[tuple(items)] nd = x.ndim y = cls.copy()", "ra = minmax[1] - minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th", "with subsequent and temporal equidistant instances ( timeseries ). Increments are calculated with", "float(wh) #print th, TP[cnt], FP[cnt] cnt += 1 return TP, FP def accuracy(y_true,y_pred,taxis=0):", "the above defined forecast uncertainity U and the timeseries variability V. sscore =", "= np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply( diffx,", "Calculates \"Forecast Uncertainty\" as defined my Marquez and Coimbra, 2013 (\"Proposed Metrics for", "absolute difference between two cumulative distribution functions (CDFs), expressed as :param x: Vector", "as the ratio of the \"length\" of the measured irradiance plotted against time", "float((TP + FP + FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x,", "classification score: In case of binary forecasts you can use boolean arrays or", "of short-term extreme events in the power system. :param x: vector of observations", "will be treated synonymously with excess kurtosis. A distribution with a positive kurtosis", "t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky index for time t csi1 =", "\"\"\" if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1):", "be ~ 1. The same is for very overcast days. Higher variability (changes", "%d (%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0]))", "distribution, known as platykurtic. The pronounced peaks of the leptokurtic distribution represent a", "list(x.shape) wh = shape[taxis] shape[taxis] = nbins TP = np.empty(shape) FP = np.empty(shape)", "given value. :param x: vector of observations :param y: vector of forecasts :param", "np.subtract(y,ym) prod1 = np.multiply( diffx, diffy ) prodx = np.multiply( diffx, diffx )", "vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with axis functionality) Description: Pearson’s correlation coefficientq", "evaluating solar forecast models proposed by Marquez and Coimbra (2012) \"proposed metric for", "+ \"MEAN OBS = %.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x / c))", "ratio tp / (tp + fp) where tp is the number of true", ">= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean", "rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\"", "sky vector :param p: reference vector :returns a: a string with a number", "as introduced in Marquez and Coimbra (2012) \"proposed metric for evaluation of solar", "axis functionality) Description: Pearson’s correlation coefficientq is a global error measure metric; a", "intuitively the ability of the classifier not to label as positive a sample", "/ float(wh) FP[cnt] = np.sum((y_pred == True) & (y_true == False),axis=taxis) / float(wh)", "standard deviation of all increments. :param x: float vector of irradiance values :param", "np.percentile(y,25) return iqr_x - iqr_y def r2(y,x): \"\"\" Calculates coefficient of determination R^2", "- ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn", "the probability distribution, and is the third standardized moment Assuming that forecast errors", "of elements with same index are compared. Description: Same as MAE but normalized", "the given range of thresholds \"\"\" if taxis >= 0: shape = list(x.shape)", "so pairs of elements with same index are compared. Description: The RMSE provides", "__future__ import division import numpy as np \"\"\" Different error metrics. Defintion and", "np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) )", "length, so pairs of elements with same index are compared. Description: Kurtosis is", "higher values of VI. :param x: vector if irradiance values :param cls: vector", "\"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s correlation coefficient is a global error", "\"\"\" #from sklearn.metrics import accuracy_score TP = np.sum((y_pred == True) & (y_true ==", ":param taxis (optional): Axis along which the means are computed :returns: MaxAE \"\"\"", "y) a = a + \"CORR = %.4f \\n \" % pearson(x, y)", "models\" Description: The metric sscore is calculated as the ratio of the above", "W/m2. :returns: Solar irradiance variability score ( scalar ) VI \"\"\" y =", "int, optional: Timelag/stepsize t in indizes for increments :param cmin: float, optional: minimum", "reference forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return (", ":param taxis (optional): Axis along which the means are computed :returns: RMSE \"\"\"", "Pearson Correlation Coefficient Description: Pearson’s correlation coefficient is a global error measure metric;", "ksi = np.sum(D) def pearsonr(x, y): # Assume len(x) == len(y) n =", ":param x: vector of observations :param y: vector of forecasts :param fac: value", "\\n \" % FS(x, y, p) a = a + \"MEAN OBS =", "standardized moment Assuming that forecast errors are equal to forecast power minus actual", "(with axis functionality) Description: Pearson’s correlation coefficientq is a global error measure metric;", "and Coimbra, 2013 (\"Proposed Metrics for Evaulation of Solar Forecasting Models\") \"Here we", "diffprod = 0 xdiff2 = 0 ydiff2 = 0 cnt = 0 for", "Kolmogorov-Smirnow Test Integral (KSI) The KSI and OVER metrics were proposed by Espinar", "irradiance values :param cls: float vector of corresponding clear sky irradiance values :param", "0 for idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx]", "for normalization (e.g. capacity factor, mean csi) :param taxis (optional): Axis along which", "for details how to use In case of binary forecasts you can use", "or just 0 or 1s. \"\"\" from sklearn.metrics import precision_score return precision_score(y_true, y_pred,", "Description: Skewness is a measure of the asymmetry of the probability distribution, and", "calculations. default is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] =", "of clear sky index increments :returns: V = solar variability \"\"\" def slc(arr,s,e,ndims):", "et al. 12. The Kolmogorov–Smirnov (KS) test is a nonparametric test to determine", "= slice(s,e) items = [slice(None, None, None)] * ndims items[taxis] = irange return", "np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with axis functionality)", "sscore = 0 means the variability dominates the forecast. By definition a persistence", "pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n),", "\"\"\" m = 100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins)", "a persistence forecast. :param x: vector of irradiance values :param y: vector of", "x: vector of irradiance values :param y: vector of irradiance forecasts :param cls:", "value. :param x: vector of observations :param y: vector of forecasts :param fac:", "solar irradiance so that the diurnal variability is neglected.\" This method can use", "else: Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy", "FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS is defined as 1 - (", "problems and by the renewable energy industry to evaluate forecast performance. The MAE", "np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody)", "variability index defined by Stein et al. \"The variability index: A new and", "a sample distribution and that of the normal distribution is known as the", "be modeled Input: :param x: Vector of observation values :param y: Vector of", "3rd International Workshop on Integration of Solar Power into Power Systems \"\"\" def", "sky index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd", "mated clear sky value of the solar irradiance over a subset time window", "account for extreme forecast events. :param x: vector of observations :param y: vector", "iqr_y def r2(y,x): \"\"\" Calculates coefficient of determination R^2 Description: R^2 is a", "calculated as the ratio of the above defined forecast uncertainity U and the", "pearson(x, y) if p != \"\": a = a + \"FS = %.4f", "axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error (MAPE)", ":param y: Vector of forecast values :returns: IQR \"\"\" iqr_x = np.percentile(x,75) -", "y = cls.copy() # don't use values for low irradiance values y[cls<=cmin] =", "the forecast. By definition a persistence forecast has a sscore = 0. A", "vector are given. Additionaly a normalizing value must be given, e.g. capacity factor,", "observation and forecast vector are given. Both vectors must have same length, so", "how fat-tailed the distribution is, and is the fourth standardized moment The difference", "dtype=np.float32) diffprod = 0 xdiff2 = 0 ydiff2 = 0 cnt = 0", "2013 (\"Proposed Metrics for Evaulation of Solar Forecasting Models\") \"Here we define the", "of Solar Power Forecasting, conference paper, 3rd International Workshop on Integration of Solar", "75th percentile and the 25th percentile. This function returns the difference of two", "Curve (ROC) :param x: observation vector :param y: forecast vector :param minmax: range", "of elements with same index are compared. Description: Skewness is a measure of", "sky reference values :param t: timelag for variability calculations :param cmin: minimum values", "else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS is defined", "return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS", "U and the timeseries variability V. sscore = 1 means a perfect forecast.", "platykurtic. The pronounced peaks of the leptokurtic distribution represent a large number of", "Skill (FS) FS is defined as 1 - ( Error(Forecast) / Error(Reference) )", "y: Vector of forecast values :param p: Vector of reference forecast :returns: FS", "np.sqrt(N) D = np.max(cdf_x - cdf_y) # Observation maximum and minimum Pmax =", "= np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis == 0:", "Calculates a variability index defined by Stein et al. \"The variability index: A", "\"\": a = a + \"FS = %.4f \\n \" % FS(x, y,", "a + \"MEAN CLS = %.4f \\n \" % np.nanmean(c) a = a", "means are computed :returns: RMSE \"\"\" if taxis >= 0: return np.sqrt(np.nanmean( np.square(", "vectors with subsequent and temporal equidistant instances ( timeseries ). Increments are calculated", "0 return num / den def pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient", "and Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\" Description: \"Solar", "Calculate skewness of the probability distribution of the forecast error if an observation", "sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y,", "significantly different. The KS statistic D is defined as the maximum value of", "boolean arrays or just 0 or 1s. \"\"\" from sklearn.metrics import precision_score return", "= float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2), x)) sum_y_sq = sum(map(lambda x:", "== False) & (y_true == False),axis=taxis) FP = np.sum((y_pred == True) & (y_true", "forecast errors leads to an over-forecasting tail, and a negative skewness leads to", "errors leads to an over-forecasting tail, and a negative skewness leads to an", "of two IQR. Input: :param x: Vector of observation values :param y: Vector", "along which the means are computed :returns: MAE \"\"\" if taxis >= 0:", ") ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE) if an observation", "def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE) if an observation and forecast", "np.multiply( diffx, diffx ) prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan", "be used in the calculations. default is 50 W/m2. :returns: deltak = vector", "forecasts :param cls: vector of clear sky reference values :param t: timelag for", "sky vectors with subsequent and temporal equidistant instances ( timeseries ). Increments are", "np.nan Vc = 1.63 / np.sqrt(N) D = np.max(cdf_x - cdf_y) # Observation", "def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with axis functionality) Description: Pearson’s correlation", "np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\"", "the variance of the data which is to be modeled Input: :param x:", "float(y.shape[0])) a = a + \"RMSE = %.4f \\n \" % rmse(x, y)", "%.4f \\n \" % sscore(x, y, c, 60) if p != \"\": a", "clear sky solar irradiance so that the diurnal variability is neglected.\" This method", "the starting of more expensive, but faster starting, units in the dispatch process.", "An over-forecasting tendency could lead to a less than optimal number of large", "1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case of", "a + \"FS = %.4f \\n \" % FS(x, y, p) a =", "with a number of metrics\"\"\" a = \"Number of measurements = %d (%.2f)", ":param fac: value for normalization (e.g. capacity factor, mean csi) :param taxis (optional):", "values :param y: Vector of forecast values :returns: IQR \"\"\" iqr_x = np.percentile(x,75)", "wh = x.shape[0] ra = minmax[1] - minmax[0] cnt = 0 ths =", "would allow power system operators to better allocate resources for compensating forecast errors", "np.empty(shape) FP = np.empty(shape) else: TP = np.empty(nbins) FP = np.empty(nbins) x =", "( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn module", "p: Vector of reference forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 =", "to evaluate the forecasting of short-term extreme events in the power system. :param", "nbins TP = np.empty(shape) FP = np.empty(shape) else: TP = np.empty(nbins) FP =", "used in the calculations. default is 50 W/m2. :returns: deltak = vector of", "value Vc N = len(y) if N < 35: print(\"Number of data points", "used in the calculations. default is 50 W/m2. :returns: Solar irradiance variability score", "sklearn module sklearn.metrics.precision_score The precision is the ratio tp / (tp + fp)", "same index are compared. Description: The RMSE provides a global error measure during", "sample distribution and that of the normal distribution is known as the excess", "index are compared. Description: Same as MAE but normalized differences are normalized to", "given datasets Description: (not from the paper) IQR is the difference between the", "forecast errors are equal to forecast power minus actual power, a positive skewness", "thresholds, give a tupel (e.g. (0,1) in ) :param nbins: number of bins/thresholds", "skewness of the probability distribution of the forecast error if an observation and", "computed :returns: MAE \"\"\" if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return", "prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if", "second dimension, while iterating is done on the values in the first axis.", "a metric for evaluating solar forecast models proposed by Marquez and Coimbra (2012)", "- np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y def r2(y,x):", "np.sum((y_pred == True) & (y_true == True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred ==", "events in the power system. :param x: vector of observations :param y: vector", "cnt += 1 if cnt == 0: return np.nan return diffprod / np.sqrt(xdiff2", "\"\"\" from scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range", "best value is 1 and the worst value is 0. Look at sklearn.metrics.precision_score", "x: Vector of observation values :param y: Vector of forecast values :returns: R^2", "vector of irradiance values :param y: vector of irradiance forecasts :param cls: vector", "error (MAPE) if an observation and forecast vector are given. Additionaly a normalizing", "np.sum((y_pred == True) & (y_true == True),axis=taxis) TN = np.sum((y_pred == False) &", "Description: Solar Variability VI over a period of time is calculated as the", "forecast bias (over- or under- forecasting) would allow power system operators to better", "larger value of Pearson’s correlation coefficient indicates an improved solar forecasting skill. :param", "= np.max(cdf_x - cdf_y) # Observation maximum and minimum Pmax = np.max(x); Pmin", "of observations :param y: vector of forecasts :param fac: value for normalization (e.g.", "are compared. Description: Kurtosis is a measure of the magnitude of the peak", "a measure of the asymmetry of the probability distribution, and is the third", "(y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a + \"RMSE", "how to use In case of binary forecasts you can use boolean arrays", "test is a nonparametric test to determine if two data sets are significantly", "% FS(x, y, p) a = a + \"SSCORE Persistence 60s = %.4f", "actions taken to correct for under-forecasting and over-forecasting events are not equal. An", "Vector of observation values :param y: Vector of forecast values :returns: R^2 \"\"\"", "forecast has a sscore = 0. A negative sscore means that the forecast", "y: x * y, x, y)) num = psum - (sum_x * sum_y/n)", "The pronounced peaks of the leptokurtic distribution represent a large number of very", "mean biae error (MBE) if an observation and forecast vector are given. Both", "percentile. This function returns the difference of two IQR. Input: :param x: Vector", "is 1 and the worst value is 0. Look at sklearn.metrics.precision_score for details", "so that the diurnal variability is neglected.\" This method can use single-dimensional obervation", "forecasting of short-term extreme events in the power system. :param x: vector of", "use single-dimensional obervation and clear sky vectors with subsequent and temporal equidistant instances", "along which the means are computed :returns: MaxAE \"\"\" if taxis >= 0:", "== 0: ym = ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y,", "deltak = vector of clear sky index increments :returns: V = solar variability", "length, so pairs of elements with same index are compared. Description: The MaxAE", "against time. On a clear day, VI would be ~ 1. The same", "25th percentile. This function returns the difference of two IQR. Input: :param x:", ":returns a: a string with a number of metrics\"\"\" a = \"Number of", "if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates", "classifier not to label as positive a sample that is negative. The best", "= np.subtract(y,ym) prod1 = np.multiply( diffx, diffy ) prodx = np.multiply( diffx, diffx", "determination R^2 Description: R^2 is a comparison of the variance of the errors", "\"Here we define the uncertainty as the standard deviation of a model forecast", "np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y def", "must be given, e.g. capacity factor, average CSI,... Both vectors must have same", "forecast bias. Understanding the overall forecast bias (over- or under- forecasting) would allow", "return num / den def pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient Description:", "irradiance values :param t: int, optional: Timelag/stepsize t in indizes for increments :param", "of forecast values :returns: IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y =", "case of binary forecasts you can use boolean arrays or just 0 or", "ym.reshape(ym.shape[0],1) if taxis == 0: Xm = Xm.T if taxis == 0: ym", "to forecast power minus actual power, a positive skewness of the forecast errors", "term kurtosis will be treated synonymously with excess kurtosis. A distribution with a", "solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´ sliced from", "\"\"\" if taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else:", "persistence forecast. :param x: vector of irradiance values :param y: vector of irradiance", "forecasts :param taxis (optional): Axis along which the means are computed :returns: MBE", "different. The KS statistic D is defined as the maximum value of the", "y_pred, **kwargs) def prints(x, y, c, p=\"\"): \"\"\" Gives a summary of error", "np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE) if", "y)) num = psum - (sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x,", "calculations :param cmin: minimum values of clear sky reference to be used in", "sky index for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky index for", "= %.4f \\n \" % sscore(x, y, c, 60) if p != \"\":", "model forecast error divided by the esti- mated clear sky value of the", "a global error measure metric; a larger value of Pearson’s correlation coefficient indicates", "the subsequent anIn [142]: U alysis, the term kurtosis will be treated synonymously", "so pairs of elements with same index are compared. Description: Same as MAE", "as 1 - ( Error(Forecast) / Error(Reference) ) :param x: Vector of observation", "of more expensive, but faster starting, units in the dispatch process. :param x:", "Marquez and Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\" Description:", "variability score ( scalar ) VI \"\"\" y = cls.copy() y[cls<=cmin] = np.nan", "shape = list(x.shape) wh = shape[taxis] shape[taxis] = nbins TP = np.empty(shape) FP", "= np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1)", "R^2 is a comparison of the variance of the errors to the variance", "= minmax[1] - minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in", "range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx] - avg_x ydiff =", "Calculate mean biae error (MBE) if an observation and forecast vector are given.", "Vc N = len(y) if N < 35: print(\"Number of data points for", "Calculates coefficient of determination R^2 Description: R^2 is a comparison of the variance", "iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y def r2(y,x): \"\"\" Calculates", "np.nansum(prody) ) r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean", "and pv output variability\" Description: Solar Variability VI over a period of time", "np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den) return r", "of clear sky reference to be used in the calculations. default is 50", "worst value is 0. Look at sklearn.metrics.precision_score for details how to use In", "defined my Marquez and Coimbra, 2013 (\"Proposed Metrics for Evaulation of Solar Forecasting", "elements with same index are compared. Description: The MBE metric intends to indicate", "Evaluating the Accuracy of Solar Power Forecasting, conference paper, 3rd International Workshop on", "np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI", "FN = np.sum((y_pred == False) & (y_true == True),axis=taxis) return np.divide( (TP +", "distance d = ( Pmax - Pmin ) / m ksi = np.sum(D)", "V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\"", "= a + \"SSCORE 60s = %.4f \\n \" % sscore(x, y, c,", "obervation and clear sky vectors with subsequent and temporal equidistant instances ( timeseries", "* (sum_y_sq - pow(sum_y, 2) / n), 0.5) if den == 0: return", "the esti- mated clear sky value of the solar irradiance over a subset", "expressed as :param x: Vector of observation values :param y: Vector of forecast", "maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE) if an observation and forecast", "of observations :param y: vector of forecasts :returns: Kurtosis \"\"\" from scipy.stats import", "also a global error measure metric, which, unlike the RMSE metric, does not", "True) & (y_true == True),axis=taxis) TN = np.sum((y_pred == False) & (y_true ==", "which the means are computed :returns: MaxAE \"\"\" if taxis >= 0: return", "of Solar Forecasting Models\") \"Here we define the uncertainty as the standard deviation", "Critical value Vc N = len(y) if N < 35: print(\"Number of data", "csi0 = np.divide(x[t:],y[t:]) # clear sky index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd))", "same index are compared. Description: Kurtosis is a measure of the magnitude of", "Accuracy classification score: In case of binary forecasts you can use boolean arrays", "\" % rmse(x, y) a = a + \"BIAS = %.4f \\n \"", "np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric", "Solar Power into Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test", "returns vector of true positive TP and false positive FP for the given", "ndims < 3 if taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym =", "\\n \" % FS(x, y, p) a = a + \"SSCORE Persistence 60s", ":param t: int, optional: Timelag/stepsize t in indizes for increments :param cmin: float,", "and OVER metrics were proposed by Espinar et al. 12. The Kolmogorov–Smirnov (KS)", "errors to the variance of the data which is to be modeled Input:", "TP = np.empty(nbins) FP = np.empty(nbins) x = x.flatten() y = y.flatten() wh", "intends to indicate average forecast bias. Understanding the overall forecast bias (over- or", "conference paper, 3rd International Workshop on Integration of Solar Power into Power Systems", "solar forecast models proposed by Marquez and Coimbra (2012) \"proposed metric for evaluation", "y: Vector of forecasts :returns: Correlation Coefficient \"\"\" assert len(x) == len(y) n", "np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE) if", "obserations :param y: Vector of forecasts :returns: Correlation Coefficient \"\"\" assert len(x) ==", "extreme forecast events. :param x: vector of observations :param y: vector of forecasts", "precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn module sklearn.metrics.precision_score The precision is the", "input array ´arr´ sliced from ´s´ to ´e´ at the specified axis ´taxis´\"\"\"", "deviations of forecast errors. The MaxAE metric is useful to evaluate the forecasting", "vector of observations :param y: vector of forecasts :returns: Skewness \"\"\" from scipy.stats", "( scalar ) VI \"\"\" y = cls.copy() y[cls<=cmin] = np.nan sum1 =", "leads to an over-forecasting tail, and a negative skewness leads to an under-forecasting", "computed :returns: Correlation Coefficient \"\"\" ndims = X.ndim assert ndims < 3 if", "2: # clear sky index for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear", "%.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x / c)) a = a +", "avg_y diffprod += xdiff * ydiff xdiff2 += xdiff * xdiff ydiff2 +=", "in minutes :param cmin: minimum values of clear sky reference to be used", "events are not equal. An over-forecasting tendency could lead to a less than", "tp,fp: returns vector of true positive TP and false positive FP for the", "= np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error (RMSE)", "a = a + \"FS = %.4f \\n \" % FS(x, y, p)", "novel metric for quantifying irradiance and pv output variability\" Description: Solar Variability VI", "avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 = 0 ydiff2 = 0", "cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value Vc N = len(y) if N", "x.ndim y = cls.copy() # don't use values for low irradiance values y[cls<=cmin]", "the fourth standardized moment The difference between the kurtosis of a sample distribution", "a larger value of Pearson’s correlation coefficient indicates an improved solar forecasting skill.", "= np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates", "peaked distribution; whereas a negative kurtosis indicates a flat data distribution, known as", "be in the second dimension. Increments are calculated in the second dimension, while", "forecast power minus actual power, a positive skewness of the forecast errors leads", "give a tupel (e.g. (0,1) in ) :param nbins: number of bins/thresholds inside", "ydiff cnt += 1 if cnt == 0: return np.nan return diffprod /", "standard deviation of the step-changes of the measured solar irradiance to that of", "number of true positives and fp the number of false positives. The precision", "r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1)", "of the forecast error if an observation and forecast vector are given. Both", "shape[taxis] shape[taxis] = nbins TP = np.empty(shape) FP = np.empty(shape) else: TP =", "for th in ths: y_pred = y >= th y_true = x >=", "evaluation of solar forecasting models\" Description: \"Solar variability V is the standard deviation", "value must be given, e.g. capacity factor, average CSI,... Both vectors must have", "axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating solar forecast models", "nonparametric test to determine if two data sets are significantly different. The KS", "improved solar forecasting skill. :param x: Vector of obserations :param y: Vector of", "( 1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness of the probability", "R^2 \"\"\" r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x) ) return r2", "excess kurtosis. A distribution with a positive kurtosis value is known as leptokurtic,", "Calculates the Kolmogorov-Smirnow Test Integral (KSI) The KSI and OVER metrics were proposed", "items[taxis] = irange return arr[tuple(items)] nd = x.ndim y = cls.copy() # don't", "Calculates Interquartile Range Difference (IQR Diff) of a two given datasets Description: (not", "m = 100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) #", "= a + \"FS = %.4f \\n \" % FS(x, y, p) a", "= np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined my", "that the forecast performs worse than a persistence forecast. :param x: vector of", "average period in minutes :param cmin: minimum values of clear sky reference to", "\\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a =", "VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined by Stein et al. \"The variability", "Gives a summary of error metrics :param x: observation vector :param y: forecast", ":param p: Vector of reference forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2", "\"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI) The KSI and", "kurtosis(x,y): \"\"\" Calculate kurtosis of the probability distribution of the forecast error if", "module sklearn.metrics.precision_score The precision is the ratio tp / (tp + fp) where", "operators to better allocate resources for compensating forecast errors in the dispatch process.", "np.divide( (TP + TN) , float((TP + FP + FN + TN))) #return", "compared. Description: Skewness is a measure of the asymmetry of the probability distribution,", "return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined my Marquez and", ">= th y_true = x >= th TP[cnt] = np.sum((y_pred == True) &", "= sum(map(lambda x, y: x * y, x, y)) num = psum -", "a = a + \"Number of forecasts = %d (%.2f) \\n \" %", "measure metric; a larger value of Pearson’s correlation coefficient indicates an improved solar", "vector if irradiance values :param cls: vector of clear sky reference values :param", "+ FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c, p=\"\"):", "solar irradiance to that of a clear sky solar irradiance so that the", "maximum absolute error (MaxAE) if an observation and forecast vector are given. Both", "Uncertainty\" as defined my Marquez and Coimbra, 2013 (\"Proposed Metrics for Evaulation of", "of forecast values :param p: Vector of reference forecast :returns: FS \"\"\" err1", "a = a + \"MEAN OBS = %.4f (%.3f) \\n \" % (np.nanmean(x),", "specified axis ´taxis´\"\"\" irange = slice(s,e) items = [slice(None, None, None)] * ndims", "# clear sky index for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky", "* ydiff xdiff2 += xdiff * xdiff ydiff2 += ydiff * ydiff cnt", ":param y: vector of irradiance forecasts :param cls: vector of clear sky reference", "the term kurtosis will be treated synonymously with excess kurtosis. A distribution with", "sum(map(lambda x, y: x * y, x, y)) num = psum - (sum_x", "(MBE) if an observation and forecast vector are given. Both vectors must have", "to be corrected through the starting of more expensive, but faster starting, units", "to ´e´ at the specified axis ´taxis´\"\"\" irange = slice(s,e) items = [slice(None,", "could lead to a less than optimal number of large thermal units being", "%.4f \\n \" % rmse(x, y) a = a + \"BIAS = %.4f", "VI would be ~ 1. The same is for very overcast days. Higher", "np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error (RMSE) if", "for evaluating solar forecast models proposed by Marquez and Coimbra (2012) \"proposed metric", "normalized to a given value. :param x: vector of observations :param y: vector", "values :param y: Vector of forecast values :param p: Vector of reference forecast", "expensive, but faster starting, units in the dispatch process. :param x: vector of", ") return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as introduced in", "np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn module sklearn.metrics.precision_score", "& (y_true == True),axis=taxis) TN = np.sum((y_pred == False) & (y_true == False),axis=taxis)", "return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE) if an observation", "of the step-changes of the measured solar irradiance to that of a clear", "Xm.T if taxis == 0: ym = ym.T else: Xm = np.nanmean(X, dtype=np.float32)", "and that of the normal distribution is known as the excess kurtosis. In", "cnt == 0: return np.nan return diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1):", "(optional): Axis along which the means are computed :returns: RMSE \"\"\" if taxis", "subsequent instances must be in the second dimension. Increments are calculated in the", "FS is defined as 1 - ( Error(Forecast) / Error(Reference) ) :param x:", "for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky index for time t", ":returns: MAE \"\"\" if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32)", "sky reference to be used in the calculations. default is 50 W/m2. :returns", "cdf(y,nbins=nbins) # Critical value Vc N = len(y) if N < 35: print(\"Number", "Difference (IQR Diff) of a two given datasets Description: (not from the paper)", "is to be modeled Input: :param x: Vector of observation values :param y:", "data points for KSI not sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63 /", "is the ratio tp / (tp + fp) where tp is the number", "= np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx)", "bias (over- or under- forecasting) would allow power system operators to better allocate", "np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) *", "is 50 W/m2. :return U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls", "for evaluation of solar forecasting models\" Description: The metric sscore is calculated as", "\"\"\" Calculates root mean square error (RMSE) if an observation and forecast vector", "VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined", "square error (RMSE) if an observation and forecast vector are given. Both vectors", "/ (tp + fp) where tp is the number of true positives and", "with a positive kurtosis value is known as leptokurtic, which indicates a peaked", ":param x: Vector of observation values :param y: Vector of forecast values :param", "ydiff2 += ydiff * ydiff cnt += 1 if cnt == 0: return", "to be used in the calculations. default is 50 W/m2. :returns sscore: \"\"\"", "np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear sky index for", "and forecast vector are given. Additionaly a normalizing value must be given, e.g.", "c)) a = a + \"MEAN FOR = %.4f (%.3f) \\n \" %", "np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred = y >= th y_true = x", "\"\"\" Calculate kurtosis of the probability distribution of the forecast error if an", "taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm =", "The MAE has been widely used in regression problems and by the renewable", "return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute", "The best value is 1 and the worst value is 0. Look at", "true positive TP and false positive FP for the given range of thresholds", "calculate standard deviation only if number of datapoints is large enough if np.sum(np.isfinite(deltak))", "0: shape = list(x.shape) wh = shape[taxis] shape[taxis] = nbins TP = np.empty(shape)", "which the means are computed :returns: MBE \"\"\" if taxis >= 0: return", "np.sum((y_pred == True) & (y_true == False),axis=taxis) FN = np.sum((y_pred == False) &", "equal. An over-forecasting tendency could lead to a less than optimal number of", "actual power, a positive skewness of the forecast errors leads to an over-forecasting", "0 ydiff2 = 0 cnt = 0 for idx in range(n): if np.isnan(x[idx])", "from ´s´ to ´e´ at the specified axis ´taxis´\"\"\" irange = slice(s,e) items", "forecast values :returns: IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75)", "of time is calculated as the ratio of the \"length\" of the measured", "the first axis. Variability is then calculated as the standard deviation of all", "\"CORR = %.4f \\n \" % pearson(x, y) if p != \"\": a", "# clear sky index for time t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak", "taxis (optional): Axis along which the means are computed :returns: MBE \"\"\" if", "a = a + \"RMSE = %.4f \\n \" % rmse(x, y) a", "def kurtosis(x,y): \"\"\" Calculate kurtosis of the probability distribution of the forecast error", "measure metric, which, unlike the RMSE metric, does not excessively account for extreme", "= np.nan if nd == 1: # clear sky index for time t+deltat", "Diff) of a two given datasets Description: (not from the paper) IQR is", "a period of time is calculated as the ratio of the \"length\" of", "observations :param y: vector of forecasts :returns: Kurtosis \"\"\" from scipy.stats import kurtosis", "Solar Forecasting Models\") \"Here we define the uncertainty as the standard deviation of", "the calculations. default is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin]", "worse than a persistence forecast. :param x: vector of irradiance values :param y:", "of elements with same index are compared. Description: The RMSE provides a global", "minimum values of clear sky reference to be used in the calculations. default", ":returns: Correlation Coefficient \"\"\" ndims = X.ndim assert ndims < 3 if taxis", "from __future__ import division import numpy as np \"\"\" Different error metrics. Defintion", "length, so pairs of elements with same index are compared. Description: The MAE", "deviation of the step-changes of the measured solar irradiance to that of a", "data distribution, known as platykurtic. The pronounced peaks of the leptokurtic distribution represent", "scalar ) VI \"\"\" y = cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2", "return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR Diff) of a", "# encoding=utf8 from __future__ import division import numpy as np \"\"\" Different error", "return np.nan Vc = 1.63 / np.sqrt(N) D = np.max(cdf_x - cdf_y) #", "value of Pearson’s correlation coefficient indicates an improved solar forecasting skill. :param x:", "np.max(x); Pmin = np.min(x) # Interval distance d = ( Pmax - Pmin", "y, x, y)) num = psum - (sum_x * sum_y/n) den = pow((sum_x_sq", "does not excessively account for extreme forecast events. :param x: vector of observations", "so pairs of elements with same index are compared. Description: Kurtosis is a", ":param t: average period in minutes :param cmin: minimum values of clear sky", "the 25th percentile. This function returns the difference of two IQR. Input: :param", "/ n), 0.5) if den == 0: return 0 return num / den", "N=\",N,\"<35\") return np.nan Vc = 1.63 / np.sqrt(N) D = np.max(cdf_x - cdf_y)", "0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y),", "errors in the dispatch process. :param x: vector of observations :param y: vector", "a negative skewness leads to an under-forecasting tail. The tendency to over-forecast (or", "and the worst value is 0. Look at sklearn.metrics.precision_score for details how to", "= solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´ sliced", "and the timeseries variability V. sscore = 1 means a perfect forecast. sscore", "FP + FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c,", "ydiff * ydiff cnt += 1 if cnt == 0: return np.nan return", "optimal number of large thermal units being committed, which need to be corrected", "0.5) if den == 0: return 0 return num / den def pearson(x,", "starting of more expensive, but faster starting, units in the dispatch process. :param", "Input: :param x: Vector of observation values :param y: Vector of forecast values", "= x.shape[0] ra = minmax[1] - minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins))", "return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a", "def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as introduced in Marquez and Coimbra", "the calculations. default is 50 W/m2. :return U: forecast uncertainty \"\"\" return np.sqrt(", "False) & (y_true == True),axis=taxis) return np.divide( (TP + TN) , float((TP +", "float(x.shape[0])) a = a + \"Number of forecasts = %d (%.2f) \\n \"", "index: A new and novel metric for quantifying irradiance and pv output variability\"", "forecasts :returns: Skewness \"\"\" from scipy.stats import skew return skew(x-y) def kurtosis(x,y): \"\"\"", "5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V, deltak def VI(x,cls,t,cmin=50.):", "the kurtosis of a sample distribution and that of the normal distribution is", "positive FP for the given range of thresholds \"\"\" if taxis >= 0:", "len(x) == len(y) n = len(x) assert n > 0 avg_x = np.nanmean(x", "a = a + \"SSCORE Persistence 60s = %.4f \\n \" % sscore(x,", ":param x: observation vector :param y: forecast vector :param c: clear sky vector", "distribution; whereas a negative kurtosis indicates a flat data distribution, known as platykurtic.", ":param cmin: minimum values of clear sky reference to be used in the", "\"proposed metric for evaluation of solar forecasting models\" Description: The metric sscore is", ":param y: vector of forecasts :returns: Skewness \"\"\" from scipy.stats import skew return", "distribution is, and is the fourth standardized moment The difference between the kurtosis", "Range Difference (IQR Diff) of a two given datasets Description: (not from the", "renewable energy industry to evaluate forecast performance. The MAE metric is also a", "err2 = rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate", "uncertainty as the standard deviation of a model forecast error divided by the", "/ c)) a = a + \"MEAN CLS = %.4f \\n \" %", "models proposed by Marquez and Coimbra (2012) \"proposed metric for evaluation of solar", "MaxAE \"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1):", "known as the excess kurtosis. In the subsequent anIn [142]: U alysis, the", "Correlation Coefficient (with axis functionality) Description: Pearson’s correlation coefficientq is a global error", ":returns: Kurtosis \"\"\" from scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates", "Description: The RMSE provides a global error measure during the entire forecasting period.", "0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num =", "+ fp) where tp is the number of true positives and fp the", "same length, so pairs of elements with same index are compared. Description: Same", "Vector of obserations :param y: Vector of forecasts :returns: Correlation Coefficient \"\"\" assert", "Pmin ) / m ksi = np.sum(D) def pearsonr(x, y): # Assume len(x)", "evaluate forecast performance. The MAE metric is also a global error measure metric,", "default is 50 W/m2. :return U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y),", "an observation and forecast vector are given. Both vectors must have same length,", "sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2)", "which, unlike the RMSE metric, does not excessively account for extreme forecast events.", "skewness(x,y): \"\"\" Calculate skewness of the probability distribution of the forecast error if", "events. :param x: vector of observations :param y: vector of forecasts :param taxis", "industry to evaluate forecast performance. The MAE metric is also a global error", "the system actions taken to correct for under-forecasting and over-forecasting events are not", "1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using", "normalizing value must be given, e.g. capacity factor, average CSI,... Both vectors must", "= %.4f \\n \" % np.nanmean(c) a = a + \"SSCORE 60s =", "diffy = np.subtract(y,ym) prod1 = np.multiply( diffx, diffy ) prodx = np.multiply( diffx,", "instances ( timeseries ). Increments are calculated with an moving window along this", "xdiff2 += xdiff * xdiff ydiff2 += ydiff * ydiff cnt += 1", "points\" :param x: vector of irradiance values :param y: vector of irradiance forecasts", "the calculations. default is 50 W/m2. :returns: deltak = vector of clear sky", "MaxAE is an indicative of local deviations of forecast errors. The MaxAE metric", "th in ths: y_pred = y >= th y_true = x >= th", "== True) & (y_true == False),axis=taxis) FN = np.sum((y_pred == False) & (y_true", "under- forecasting) would allow power system operators to better allocate resources for compensating", "measure of the asymmetry of the probability distribution, and is the third standardized", "\"\"\" r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x) ) return r2 def", "t: timelag for variability calculations :param cmin: minimum values of clear sky reference", ":param taxis (optional): Axis along which the means are computed :returns: MBE \"\"\"", "& (y_true == True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred == True) & (y_true", "with same index are compared. Description: The MBE metric intends to indicate average", "the standard deviation of the step-changes of the measured solar irradiance to that", "\"\"\" Gives a summary of error metrics :param x: observation vector :param y:", "D is defined as the maximum value of the absolute difference between two", "leptokurtic, which indicates a peaked distribution; whereas a negative kurtosis indicates a flat", "c, p=\"\"): \"\"\" Gives a summary of error metrics :param x: observation vector", "< 35: print(\"Number of data points for KSI not sufficient. N=\",N,\"<35\") return np.nan", "useful to evaluate the forecasting of short-term extreme events in the power system.", "/ float(x.shape[0])) a = a + \"Number of forecasts = %d (%.2f) \\n", "ym = ym.reshape(ym.shape[0],1) if taxis == 0: Xm = Xm.T if taxis ==", "== True) & (y_true == False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt] cnt", "pairs of elements with same index are compared. Description: The MaxAE is an", "forecasts you can use boolean arrays or just 0 or 1s. \"\"\" #from", "Forecast Skill (FS) FS is defined as 1 - ( Error(Forecast) / Error(Reference)", "of observations :param y: vector of forecasts :param taxis (optional): Axis along which", "provided subsequent instances must be in the second dimension. Increments are calculated in", "forecast error divided by the esti- mated clear sky value of the solar", "return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC) :param", "percentage error (MAPE) if an observation and forecast vector are given. Additionaly a", "this axis. If two-dimensional vectors are provided subsequent instances must be in the", "(sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq", "length, so pairs of elements with same index are compared. Description: Skewness is", "0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred = y >= th", "taxis (optional): Axis along which the means are computed :returns: MAPE \"\"\" if", "vectors are provided subsequent instances must be in the second dimension. Increments are", "num = psum - (sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2)", "(e.g. (0,1) in ) :param nbins: number of bins/thresholds inside the range :returns", "widely used in regression problems and by the renewable energy industry to evaluate", "= x.ndim y = cls.copy() # don't use values for low irradiance values", "a global error measure metric, which, unlike the RMSE metric, does not excessively", "Compute the precision using sklearn module sklearn.metrics.precision_score The precision is the ratio tp", "FS(x, y, p) a = a + \"MEAN OBS = %.4f (%.3f) \\n", "or, conversely, how fat-tailed the distribution is, and is the fourth standardized moment", "else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE) if an", "used in the calculations. default is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] =", "is defined as the maximum value of the absolute difference between two cumulative", "(MaxAE) if an observation and forecast vector are given. Both vectors must have", "- (sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) / n) *", "from Zhang et al., 2013, Metrics for Evaluating the Accuracy of Solar Power", "np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as introduced", "for increments :param cmin: float, optional: minimum values of clear sky reference to", "value of the absolute difference between two cumulative distribution functions (CDFs), expressed as", "along which the means are computed :returns: MBE \"\"\" if taxis >= 0:", "forecast errors in the dispatch process. :param x: vector of observations :param y:", "x: Vector of obserations :param y: Vector of forecasts :param taxis (optional): Axis", "measurements = %d (%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x)))", "\"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0])", "irradiance values :param y: vector of irradiance forecasts :param cls: vector of clear", "cnt += 1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In", "with same index are compared. Description: The MaxAE is an indicative of local", "(ROC) :param x: observation vector :param y: forecast vector :param minmax: range of", "forecast uncertainity U and the timeseries variability V. sscore = 1 means a", "irradiance values :param cls: vector of clear sky reference values :param t: average", "a normalizing value must be given, e.g. capacity factor, average CSI,... Both vectors", "a subset time window of Nw data points\" :param x: vector of irradiance", "moment The difference between the kurtosis of a sample distribution and that of", "\\n \" % (np.nanmean(x), np.nanmean(x / c)) a = a + \"MEAN FOR", "FP = np.empty(shape) else: TP = np.empty(nbins) FP = np.empty(nbins) x = x.flatten()", "value of the solar irradiance over a subset time window of Nw data", "m ksi = np.sum(D) def pearsonr(x, y): # Assume len(x) == len(y) n", "& (y_true == False),axis=taxis) FN = np.sum((y_pred == False) & (y_true == True),axis=taxis)", "#print th, TP[cnt], FP[cnt] cnt += 1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\"", "= %.4f \\n \" % mbe(x, y) a = a + \"CORR =", "y = cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 =", "def pearsonr(x, y): # Assume len(x) == len(y) n = len(x) sum_x =", "Calculate mean absolute percentage error (MAPE) if an observation and forecast vector are", "return diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient", "cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value Vc N = len(y)", "value is 1 and the worst value is 0. Look at sklearn.metrics.precision_score for", "scipy.stats import skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of the probability", ") :param x: Vector of observation values :param y: Vector of forecast values", "observation vector :param y: forecast vector :param c: clear sky vector :param p:", "don't use values for low irradiance values y[cls<=cmin] = np.nan if nd ==", "distribution, or, conversely, how fat-tailed the distribution is, and is the fourth standardized", "2), x)) sum_y_sq = sum(map(lambda x: pow(x, 2), y)) psum = sum(map(lambda x,", "equal to forecast power minus actual power, a positive skewness of the forecast", "Receiver Operating Curve (ROC) :param x: observation vector :param y: forecast vector :param", "dimension, while iterating is done on the values in the first axis. Variability", "sky reference to be used in the calculations. default is 50 W/m2. :returns:", "skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of the probability distribution of", "diffprod += xdiff * ydiff xdiff2 += xdiff * xdiff ydiff2 += ydiff", "0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym", "forecast vector are given. Both vectors must have same length, so pairs of", "V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn module sklearn.metrics.precision_score The", "float vector of corresponding clear sky irradiance values :param t: int, optional: Timelag/stepsize", "np.nanvar(y-x) / np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V", ":param cmin: float, optional: minimum values of clear sky reference to be used", "a clear day, VI would be ~ 1. The same is for very", "y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32)", "\"\"\" Calculate mean absolute error (MaxAE) if an observation and forecast vector are", "must have same length, so pairs of elements with same index are compared.", "th TP[cnt] = np.sum((y_pred == True) & (y_true == True),axis=taxis) / float(wh) FP[cnt]", "diurnal variability is neglected.\" This method can use single-dimensional obervation and clear sky", ":param cls: vector of clear sky reference values :param cmin: minimum values of", "irradiance plotted against time divided by the \"length\" of the clear sky irradiance", "factor, average CSI,... Both vectors must have same length, so pairs of elements", "values :param t: average period in minutes :param cmin: minimum values of clear", "Calculate kurtosis of the probability distribution of the forecast error if an observation", "paper) IQR is the difference between the 75th percentile and the 25th percentile.", "= %.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x / c)) a = a", "y_pred = y >= th y_true = x >= th TP[cnt] = np.sum((y_pred", "forecast values :returns ksi: The KSI \"\"\" m = 100.0 nbins = 100", "y): \"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s correlation coefficient is a global", "corrected through the starting of more expensive, but faster starting, units in the", "bins/thresholds inside the range :returns tp,fp: returns vector of true positive TP and", "Coefficient Description: Pearson’s correlation coefficient is a global error measure metric; a larger", ") prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if taxis >= 0: r_num =", "Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\" Description: \"Solar variability", "a = a + \"MEAN FOR = %.4f (%.3f) \\n \" % (np.nanmean(y),", "forecasts you can use boolean arrays or just 0 or 1s. \"\"\" from", "return 0 return num / den def pearson(x, y): \"\"\" Calculates Pearson Correlation", "vector of clear sky reference values :param t: average period in minutes :param", "output variability\" Description: Solar Variability VI over a period of time is calculated", "V = solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´", "number of bins/thresholds inside the range :returns tp,fp: returns vector of true positive", "xdiff = x[idx] - avg_x ydiff = y[idx] - avg_y diffprod += xdiff", ":param x: float vector of irradiance values :param cls: float vector of corresponding", "x: vector of observations :param y: vector of forecasts :returns: Skewness \"\"\" from", "skewness leads to an under-forecasting tail. The tendency to over-forecast (or under-forecast) is", "If two-dimensional vectors are provided subsequent instances must be in the second dimension.", "of forecasts :param fac: value for normalization (e.g. capacity factor, mean csi) :param", "but faster starting, units in the dispatch process. :param x: vector of observations", "np.subtract(csi0,csi1) # calculate standard deviation only if number of datapoints is large enough", "be used in the calculations. default is 50 W/m2. :return U: forecast uncertainty", "len(y) n = len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda", "time is calculated as the ratio of the \"length\" of the measured irradiance", "Axis along which the means are computed :returns: MAPE \"\"\" if taxis >=", "indicates a flat data distribution, known as platykurtic. The pronounced peaks of the", "compensating forecast errors in the dispatch process. :param x: vector of observations :param", "np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS is", "\" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a", "np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined by", "the leptokurtic distribution represent a large number of very small forecast errors :param", "moving window along this axis. If two-dimensional vectors are provided subsequent instances must", "variability is neglected.\" This method can use single-dimensional obervation and clear sky vectors", "perfect forecast. sscore = 0 means the variability dominates the forecast. By definition", "thermal units being committed, which need to be corrected through the starting of", "Vector of observation values :param y: Vector of forecast values :returns ksi: The", "= np.divide(x[t:],y[t:]) # clear sky index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1", "nd == 2: # clear sky index for time t+deltat csi0 = np.divide(x[:,t],y[:,t])", "is neglected.\" This method can use single-dimensional obervation and clear sky vectors with", ":returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1 -", "we define the uncertainty as the standard deviation of a model forecast error", "correlation coefficient is a global error measure metric; a larger value of Pearson’s", "np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 = 0 ydiff2", "of observation values :param y: Vector of forecast values :returns: IQR \"\"\" iqr_x", "= np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y def r2(y,x): \"\"\" Calculates coefficient", "x[cls<=cmin] = np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\"", "ydiff xdiff2 += xdiff * xdiff ydiff2 += ydiff * ydiff cnt +=", "TP = np.sum((y_pred == True) & (y_true == True),axis=taxis) TN = np.sum((y_pred ==", "a negative kurtosis indicates a flat data distribution, known as platykurtic. The pronounced", "of elements with same index are compared. Description: The MBE metric intends to", "maximum value of the absolute difference between two cumulative distribution functions (CDFs), expressed", "variability (changes in time) of irradiance will lead to higher values of VI.", ":param cls: vector of clear sky reference values :param t: average period in", "and a negative skewness leads to an under-forecasting tail. The tendency to over-forecast", "same is for very overcast days. Higher variability (changes in time) of irradiance", "al., 2013, Metrics for Evaluating the Accuracy of Solar Power Forecasting, conference paper,", "np.sum((y_pred == False) & (y_true == False),axis=taxis) FP = np.sum((y_pred == True) &", "metrics :param x: observation vector :param y: forecast vector :param c: clear sky", "of datapoints is large enough if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else:", "== True) & (y_true == True),axis=taxis) TN = np.sum((y_pred == False) & (y_true", "<filename>skysol/validation/error_metrics.py # encoding=utf8 from __future__ import division import numpy as np \"\"\" Different", "for compensating forecast errors in the dispatch process. :param x: vector of observations", "(TP + TN) , float((TP + FP + FN + TN))) #return accuracy_score(y_true,", "2) / n), 0.5) if den == 0: return 0 return num /", "Vector of reference forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis)", "during the entire forecasting period. :param x: vector of observations :param y: vector", "x: vector of observations :param y: vector of forecasts :param taxis (optional): Axis", "IQR. Input: :param x: Vector of observation values :param y: Vector of forecast", "distribution represent a large number of very small forecast errors :param x: vector", "solar forecasting skill. :param x: Vector of obserations :param y: Vector of forecasts", "\" % FS(x, y, p) a = a + \"SSCORE Persistence 60s =", "the forecast errors leads to an over-forecasting tail, and a negative skewness leads", "% (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a +", "A negative sscore means that the forecast performs worse than a persistence forecast.", "not to label as positive a sample that is negative. The best value", "psum = sum(map(lambda x, y: x * y, x, y)) num = psum", "values :returns ksi: The KSI \"\"\" m = 100.0 nbins = 100 cdf_x", "use boolean arrays or just 0 or 1s. \"\"\" #from sklearn.metrics import accuracy_score", "vector of corresponding clear sky irradiance values :param t: int, optional: Timelag/stepsize t", "nbins: number of bins/thresholds inside the range :returns tp,fp: returns vector of true", "CSI,... Both vectors must have same length, so pairs of elements with same", "factor, mean csi) :param taxis (optional): Axis along which the means are computed", "a persistence forecast has a sscore = 0. A negative sscore means that", ":param x: Vector of obserations :param y: Vector of forecasts :returns: Correlation Coefficient", "MAPE \"\"\" if taxis >= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return", "Description: R^2 is a comparison of the variance of the errors to the", "axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae", "done on the values in the first axis. Variability is then calculated as", "np.sum(D) def pearsonr(x, y): # Assume len(x) == len(y) n = len(x) sum_x", "the calculations. default is 50 W/m2. :returns: Solar irradiance variability score ( scalar", "numpy as np \"\"\" Different error metrics. Defintion and description of some from", "if taxis >= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac", "use In case of binary forecasts you can use boolean arrays or just", "+ \"Number of forecasts = %d (%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)),", "nd == 1: # clear sky index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd))", "= np.sum((y_pred == False) & (y_true == False),axis=taxis) FP = np.sum((y_pred == True)", "TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c, p=\"\"): \"\"\" Gives a", "forecast. By definition a persistence forecast has a sscore = 0. A negative", "description of some from Zhang et al., 2013, Metrics for Evaluating the Accuracy", "len(y) n = len(x) assert n > 0 avg_x = np.nanmean(x ) avg_y", "of local deviations of forecast errors. The MaxAE metric is useful to evaluate", "mean absolute error (MaxAE) if an observation and forecast vector are given. Both", "axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\"", "dispatch process. :param x: vector of observations :param y: vector of forecasts :param", "r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.):", "float(wh) FP[cnt] = np.sum((y_pred == True) & (y_true == False),axis=taxis) / float(wh) #print", "= np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear sky index", ":returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return 1 - (", "flat data distribution, known as platykurtic. The pronounced peaks of the leptokurtic distribution", "np.max(cdf_x - cdf_y) # Observation maximum and minimum Pmax = np.max(x); Pmin =", "def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR Diff) of a two given", "+ FP + FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y,", "be used in the calculations. default is 50 W/m2. :returns: Solar irradiance variability", "of the peak of the distribution, or, conversely, how fat-tailed the distribution is,", "is known as the excess kurtosis. In the subsequent anIn [142]: U alysis,", "normalized differences are normalized to a given value. :param x: vector of observations", "and is the fourth standardized moment The difference between the kurtosis of a", "x: observation vector :param y: forecast vector :param minmax: range of thresholds, give", "= a + \"MEAN FOR = %.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y", "process. :param x: vector of observations :param y: vector of forecasts :returns: Skewness", "def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case of binary forecasts you can", "means are computed :returns: MAPE \"\"\" if taxis >= 0: return np.nanmean(abs( (x-y)/fac", "cdf_y = cdf(y,nbins=nbins) # Critical value Vc N = len(y) if N <", "average CSI,... Both vectors must have same length, so pairs of elements with", "reference to be used in the calculations. default is 50 W/m2. :returns: Solar", ":returns: R^2 \"\"\" r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x) ) return", "Marquez and Coimbra, 2013 (\"Proposed Metrics for Evaulation of Solar Forecasting Models\") \"Here", "same index are compared. Description: The MaxAE is an indicative of local deviations", "pairs of elements with same index are compared. Description: Same as MAE but", "proposed by Marquez and Coimbra (2012) \"proposed metric for evaluation of solar forecasting", "precision using sklearn module sklearn.metrics.precision_score The precision is the ratio tp / (tp", "positives and fp the number of false positives. The precision is intuitively the", "mean csi) :param taxis (optional): Axis along which the means are computed :returns:", ">= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32)", "== False),axis=taxis) FN = np.sum((y_pred == False) & (y_true == True),axis=taxis) return np.divide(", "= %.4f \\n \" % pearson(x, y) if p != \"\": a =", "= np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1):", "# clear sky index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t])", ":param y: forecast vector :param c: clear sky vector :param p: reference vector", "Calculates maximum absolute error (MaxAE) if an observation and forecast vector are given.", "minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred =", "sky solar irradiance so that the diurnal variability is neglected.\" This method can", "must be in the second dimension. Increments are calculated in the second dimension,", "against time divided by the \"length\" of the clear sky irradiance plotted against", "np.sum((y_pred == True) & (y_true == False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt]", "are compared. Description: The MAE has been widely used in regression problems and", "of the above defined forecast uncertainity U and the timeseries variability V. sscore", "x: Vector of observation values :param y: Vector of forecast values :returns ksi:", "RMSE metric, does not excessively account for extreme forecast events. :param x: vector", "+ \"RMSE = %.4f \\n \" % rmse(x, y) a = a +", "\"\"\" Calculates solar variability V as introduced in Marquez and Coimbra (2012) \"proposed", ")) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE) if an observation", "division import numpy as np \"\"\" Different error metrics. Defintion and description of", "obserations :param y: Vector of forecasts :param taxis (optional): Axis along which the", "returns the input array ´arr´ sliced from ´s´ to ´e´ at the specified", "y: forecast vector :param minmax: range of thresholds, give a tupel (e.g. (0,1)", "\"\"\" assert len(x) == len(y) n = len(x) assert n > 0 avg_x", "of the probability distribution, and is the third standardized moment Assuming that forecast", "as platykurtic. The pronounced peaks of the leptokurtic distribution represent a large number", "diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply( diffx, diffy ) prodx", "precision is the ratio tp / (tp + fp) where tp is the", "the \"length\" of the clear sky irradiance plotted against time. On a clear", "the distribution is, and is the fourth standardized moment The difference between the", "are provided subsequent instances must be in the second dimension. Increments are calculated", "the means are computed :returns: RMSE \"\"\" if taxis >= 0: return np.sqrt(np.nanmean(", "2), y)) psum = sum(map(lambda x, y: x * y, x, y)) num", "errors :param x: vector of observations :param y: vector of forecasts :returns: Kurtosis", "the uncertainty as the standard deviation of a model forecast error divided by", "x: vector of observations :param y: vector of forecasts :returns: Kurtosis \"\"\" from", "range of thresholds, give a tupel (e.g. (0,1) in ) :param nbins: number", "assert ndims < 3 if taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym", "are calculated in the second dimension, while iterating is done on the values", "error measure during the entire forecasting period. :param x: vector of observations :param", "Axis along which the means are computed :returns: MAE \"\"\" if taxis >=", "\"SSCORE 60s = %.4f \\n \" % sscore(x, y, c, 60) if p", ":returns: RMSE \"\"\" if taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis)", "Coimbra, 2013 (\"Proposed Metrics for Evaulation of Solar Forecasting Models\") \"Here we define", ":param y: vector of forecasts :returns: Kurtosis \"\"\" from scipy.stats import kurtosis return", "forecast errors :param x: vector of observations :param y: vector of forecasts :returns:", "power system operators to better allocate resources for compensating forecast errors in the", "\"\"\" Compute the precision using sklearn module sklearn.metrics.precision_score The precision is the ratio", "at sklearn.metrics.precision_score for details how to use In case of binary forecasts you", "or np.isnan(y[idx]): continue xdiff = x[idx] - avg_x ydiff = y[idx] - avg_y", "which the means are computed :returns: RMSE \"\"\" if taxis >= 0: return", "import accuracy_score TP = np.sum((y_pred == True) & (y_true == True),axis=taxis) TN =", "(%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a", "Vc = 1.63 / np.sqrt(N) D = np.max(cdf_x - cdf_y) # Observation maximum", "x: pow(x, 2), x)) sum_y_sq = sum(map(lambda x: pow(x, 2), y)) psum =", "sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63 / np.sqrt(N) D = np.max(cdf_x -", "== False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt] cnt += 1 return TP,", "+ t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\"", "by the renewable energy industry to evaluate forecast performance. The MAE metric is", "The MaxAE metric is useful to evaluate the forecasting of short-term extreme events", "Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy =", "a model forecast error divided by the esti- mated clear sky value of", "return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE)", "= shape[taxis] shape[taxis] = nbins TP = np.empty(shape) FP = np.empty(shape) else: TP", "false positives. The precision is intuitively the ability of the classifier not to", "for the given range of thresholds \"\"\" if taxis >= 0: shape =", "of the classifier not to label as positive a sample that is negative.", "y_true = x >= th TP[cnt] = np.sum((y_pred == True) & (y_true ==", "Espinar et al. 12. The Kolmogorov–Smirnov (KS) test is a nonparametric test to", "y: vector of forecasts :returns: Skewness \"\"\" from scipy.stats import skew return skew(x-y)", "by the \"length\" of the clear sky irradiance plotted against time. On a", "time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky index for", "= np.multiply( diffx, diffy ) prodx = np.multiply( diffx, diffx ) prody =", "diffx, diffx ) prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)]", "if an observation and forecast vector are given. Both vectors must have same", "Difference deltak = np.subtract(csi0,csi1) # calculate standard deviation only if number of datapoints", "= 100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value Vc N", "cls: vector of clear sky reference values :param t: timelag for variability calculations", "sky irradiance values :param t: int, optional: Timelag/stepsize t in indizes for increments", "is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return", "diffx, diffy ) prodx = np.multiply( diffx, diffx ) prody = np.multiply( diffy,", "calculations. default is 50 W/m2. :return U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide(", "values :param p: Vector of reference forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis)", "of true positives and fp the number of false positives. The precision is", "\"\"\" Different error metrics. Defintion and description of some from Zhang et al.,", "by Espinar et al. 12. The Kolmogorov–Smirnov (KS) test is a nonparametric test", "measured irradiance plotted against time divided by the \"length\" of the clear sky", "a given value. :param x: vector of observations :param y: vector of forecasts", "two data sets are significantly different. The KS statistic D is defined as", "= \"Number of measurements = %d (%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)),", "two-dimensional vectors are provided subsequent instances must be in the second dimension. Increments", "just 0 or 1s. \"\"\" #from sklearn.metrics import accuracy_score TP = np.sum((y_pred ==", "The MAE metric is also a global error measure metric, which, unlike the", "vector of clear sky reference values :param cmin: minimum values of clear sky", "- pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5)", "of reference forecast :returns: FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return", "return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of the probability distribution of the", "( Error(Forecast) / Error(Reference) ) :param x: Vector of observation values :param y:", "tail. The tendency to over-forecast (or under-forecast) is important in that the system", "better allocate resources for compensating forecast errors in the dispatch process. :param x:", "are not equal. An over-forecasting tendency could lead to a less than optimal", "t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as", "the classifier not to label as positive a sample that is negative. The", "Pearson’s correlation coefficient indicates an improved solar forecasting skill. :param x: Vector of", "as defined my Marquez and Coimbra, 2013 (\"Proposed Metrics for Evaulation of Solar", "== True),axis=taxis) TN = np.sum((y_pred == False) & (y_true == False),axis=taxis) FP =", "= 0 means the variability dominates the forecast. By definition a persistence forecast", "evaluation of solar forecasting models\" Description: The metric sscore is calculated as the", "not excessively account for extreme forecast events. :param x: vector of observations :param", "= 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred = y >=", "= np.sum((y_pred == True) & (y_true == False),axis=taxis) / float(wh) #print th, TP[cnt],", "are computed :returns: MaxAE \"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return", "- cdf_y) # Observation maximum and minimum Pmax = np.max(x); Pmin = np.min(x)", "iterating is done on the values in the first axis. Variability is then", "encoding=utf8 from __future__ import division import numpy as np \"\"\" Different error metrics.", "The Kolmogorov–Smirnov (KS) test is a nonparametric test to determine if two data", "overcast days. Higher variability (changes in time) of irradiance will lead to higher", "# Observation maximum and minimum Pmax = np.max(x); Pmin = np.min(x) # Interval", "dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply( diffx, diffy )", "are compared. Description: The RMSE provides a global error measure during the entire", "np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y def r2(y,x): \"\"\"", "Stein et al. \"The variability index: A new and novel metric for quantifying", "if irradiance values :param cls: vector of clear sky reference values :param t:", "if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx] - avg_x ydiff = y[idx]", "means the variability dominates the forecast. By definition a persistence forecast has a", "& (y_true == False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt] cnt += 1", "data sets are significantly different. The KS statistic D is defined as the", "FP[cnt] = np.sum((y_pred == True) & (y_true == False),axis=taxis) / float(wh) #print th,", "sklearn.metrics.precision_score The precision is the ratio tp / (tp + fp) where tp", "inside the range :returns tp,fp: returns vector of true positive TP and false", "of VI. :param x: vector if irradiance values :param cls: vector of clear", "%.4f \\n \" % np.nanmean(c) a = a + \"SSCORE 60s = %.4f", "two given datasets Description: (not from the paper) IQR is the difference between", "of the measured solar irradiance to that of a clear sky solar irradiance", "\"BIAS = %.4f \\n \" % mbe(x, y) a = a + \"CORR", "period of time is calculated as the ratio of the \"length\" of the", "vector of forecasts :param taxis (optional): Axis along which the means are computed", "diffx ) prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] =", "(x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate", "values :param cmin: minimum values of clear sky reference to be used in", "(y_true == False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt] cnt += 1 return", "magnitude of the peak of the distribution, or, conversely, how fat-tailed the distribution", "(y_true == True),axis=taxis) TN = np.sum((y_pred == False) & (y_true == False),axis=taxis) FP", "not equal. An over-forecasting tendency could lead to a less than optimal number", ">= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill", "is 0. Look at sklearn.metrics.precision_score for details how to use In case of", "- Pmin ) / m ksi = np.sum(D) def pearsonr(x, y): # Assume", "\\n \" % sscore(x, y, c, 60) if p != \"\": a =", "clear sky irradiance values :param t: int, optional: Timelag/stepsize t in indizes for", "y): # Assume len(x) == len(y) n = len(x) sum_x = float(sum(x)) sum_y", "forecasts :param taxis (optional): Axis along which the means are computed :returns: MAE", "functionality) Description: Pearson’s correlation coefficientq is a global error measure metric; a larger", "the ability of the classifier not to label as positive a sample that", "(not from the paper) IQR is the difference between the 75th percentile and", "to be used in the calculations. default is 50 W/m2. :returns: deltak =", "False),axis=taxis) FP = np.sum((y_pred == True) & (y_true == False),axis=taxis) FN = np.sum((y_pred", "accuracy_score TP = np.sum((y_pred == True) & (y_true == True),axis=taxis) TN = np.sum((y_pred", "calculated in the second dimension, while iterating is done on the values in", "Workshop on Integration of Solar Power into Power Systems \"\"\" def ksi(fcst,obs): \"\"\"", "r2(y,x): \"\"\" Calculates coefficient of determination R^2 Description: R^2 is a comparison of", "np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating", "tendency to over-forecast (or under-forecast) is important in that the system actions taken", "pearsonr(x, y): # Assume len(x) == len(y) n = len(x) sum_x = float(sum(x))", "period. :param x: vector of observations :param y: vector of forecasts :param taxis", "x: Vector of observation values :param y: Vector of forecast values :param p:", "window along this axis. If two-dimensional vectors are provided subsequent instances must be", "forecasts :returns: Kurtosis \"\"\" from scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\"", "timeseries variability V. sscore = 1 means a perfect forecast. sscore = 0", "and the 25th percentile. This function returns the difference of two IQR. Input:", "vector of observations :param y: vector of forecasts :returns: Kurtosis \"\"\" from scipy.stats", "true positives and fp the number of false positives. The precision is intuitively", "True),axis=taxis) TN = np.sum((y_pred == False) & (y_true == False),axis=taxis) FP = np.sum((y_pred", "taken to correct for under-forecasting and over-forecasting events are not equal. An over-forecasting", "100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value Vc N =", "esti- mated clear sky value of the solar irradiance over a subset time", "+= ydiff * ydiff cnt += 1 if cnt == 0: return np.nan", "the renewable energy industry to evaluate forecast performance. The MAE metric is also", "Calculating a metric for evaluating solar forecast models proposed by Marquez and Coimbra", "sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI = np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\"", "absolute percentage error (MAPE) if an observation and forecast vector are given. Additionaly", "def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square error (RMSE) if an observation and", "range of thresholds \"\"\" if taxis >= 0: shape = list(x.shape) wh =", "from sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver", "dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum absolute error (MaxAE) if an", "peak of the distribution, or, conversely, how fat-tailed the distribution is, and is", ":param taxis (optional): Axis along which the means are computed :returns: MAPE \"\"\"", "= 100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical", "days. Higher variability (changes in time) of irradiance will lead to higher values", "U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined my Marquez and Coimbra, 2013 (\"Proposed", "TP[cnt] = np.sum((y_pred == True) & (y_true == True),axis=taxis) / float(wh) FP[cnt] =", "observation values :param y: Vector of forecast values :returns: IQR \"\"\" iqr_x =", "than optimal number of large thermal units being committed, which need to be", "the values in the first axis. Variability is then calculated as the standard", "is also a global error measure metric, which, unlike the RMSE metric, does", "p=\"\"): \"\"\" Gives a summary of error metrics :param x: observation vector :param", "Models\") \"Here we define the uncertainty as the standard deviation of a model", "np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a + \"Number of forecasts = %d (%.2f)", "observation values :param y: Vector of forecast values :returns ksi: The KSI \"\"\"", "be corrected through the starting of more expensive, but faster starting, units in", "coefficient is a global error measure metric; a larger value of Pearson’s correlation", "metric, which, unlike the RMSE metric, does not excessively account for extreme forecast", "value is 0. Look at sklearn.metrics.precision_score for details how to use In case", "N = len(y) if N < 35: print(\"Number of data points for KSI", "assert len(x) == len(y) n = len(x) assert n > 0 avg_x =", "np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis == 0: Xm", "ndims = X.ndim assert ndims < 3 if taxis >= 0: Xm =", "y.flatten() wh = x.shape[0] ra = minmax[1] - minmax[0] cnt = 0 ths", "a + \"BIAS = %.4f \\n \" % mbe(x, y) a = a", "OBS = %.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x / c)) a =", "two IQR. Input: :param x: Vector of observation values :param y: Vector of", "y, p) a = a + \"SSCORE Persistence 60s = %.4f \\n \"", ") r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates root mean square", "np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI =", "np.divide(x[t:],y[t:]) # clear sky index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 =", "= np.min(x) # Interval distance d = ( Pmax - Pmin ) /", "else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error", "Calculates Pearson Correlation Coefficient (with axis functionality) Description: Pearson’s correlation coefficientq is a", "np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS) FS is defined as 1", "over-forecasting tail, and a negative skewness leads to an under-forecasting tail. The tendency", "50 W/m2. :returns: Solar irradiance variability score ( scalar ) VI \"\"\" y", "number of large thermal units being committed, which need to be corrected through", "elements with same index are compared. Description: Skewness is a measure of the", "of false positives. The precision is intuitively the ability of the classifier not", "Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI) The KSI", "a positive skewness of the forecast errors leads to an over-forecasting tail, and", "clear sky value of the solar irradiance over a subset time window of", "of a clear sky solar irradiance so that the diurnal variability is neglected.\"", "\"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) )", "compared. Description: The RMSE provides a global error measure during the entire forecasting", "\"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´ sliced from ´s´ to", "np.empty(nbins) FP = np.empty(nbins) x = x.flatten() y = y.flatten() wh = x.shape[0]", "nd = x.ndim y = cls.copy() # don't use values for low irradiance", "of the leptokurtic distribution represent a large number of very small forecast errors", "vector of observations :param y: vector of forecasts :param fac: value for normalization", "np.nan prody[np.isnan(prod1)] = np.nan if taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den =", "of observation values :param y: Vector of forecast values :returns: R^2 \"\"\" r2", "vector of forecasts :param fac: value for normalization (e.g. capacity factor, mean csi)", "tupel (e.g. (0,1) in ) :param nbins: number of bins/thresholds inside the range", "if taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm", "a = a + \"BIAS = %.4f \\n \" % mbe(x, y) a", "the ratio of the \"length\" of the measured irradiance plotted against time divided", "nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y = cdf(y,nbins=nbins) # Critical value Vc", "of elements with same index are compared. Description: The MaxAE is an indicative", "mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE) if an observation and forecast vector", "0. A negative sscore means that the forecast performs worse than a persistence", "= 0 xdiff2 = 0 ydiff2 = 0 cnt = 0 for idx", "= float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2), x)) sum_y_sq", "first axis. Variability is then calculated as the standard deviation of all increments.", "== 0: return np.nan return diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\"", "- np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a + \"Number of", "IQR is the difference between the 75th percentile and the 25th percentile. This", "def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating solar forecast models proposed by", "Kurtosis \"\"\" from scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile", "average forecast bias. Understanding the overall forecast bias (over- or under- forecasting) would", "Error(Forecast) / Error(Reference) ) :param x: Vector of observation values :param y: Vector", "Kolmogorov–Smirnov (KS) test is a nonparametric test to determine if two data sets", ">= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square(", "y: Vector of forecast values :returns: IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25)", "the magnitude of the peak of the distribution, or, conversely, how fat-tailed the", "\"\"\" if taxis >= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(", "pronounced peaks of the leptokurtic distribution represent a large number of very small", "Xm = Xm.T if taxis == 0: ym = ym.T else: Xm =", "of clear sky reference values :param t: average period in minutes :param cmin:", "xdiff * ydiff xdiff2 += xdiff * xdiff ydiff2 += ydiff * ydiff", "V = np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index", "\"\"\" Calculates Interquartile Range Difference (IQR Diff) of a two given datasets Description:", "for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky index", "irradiance will lead to higher values of VI. :param x: vector if irradiance", "forecast models proposed by Marquez and Coimbra (2012) \"proposed metric for evaluation of", "np.min(x) # Interval distance d = ( Pmax - Pmin ) / m", "is a measure of the asymmetry of the probability distribution, and is the", "def r2(y,x): \"\"\" Calculates coefficient of determination R^2 Description: R^2 is a comparison", "be used in the calculations. default is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin]", "in the calculations. default is 50 W/m2. :returns: deltak = vector of clear", "Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis == 0: Xm = Xm.T", "are computed :returns: Correlation Coefficient \"\"\" ndims = X.ndim assert ndims < 3", "np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def", "global error measure metric, which, unlike the RMSE metric, does not excessively account", "values for low irradiance values y[cls<=cmin] = np.nan if nd == 1: #", "default is 50 W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan", "ym = ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx", ":param x: vector of observations :param y: vector of forecasts :param taxis (optional):", "Accuracy of Solar Power Forecasting, conference paper, 3rd International Workshop on Integration of", "def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE) if an observation and forecast", ":returns: Correlation Coefficient \"\"\" assert len(x) == len(y) n = len(x) assert n", "= y[idx] - avg_y diffprod += xdiff * ydiff xdiff2 += xdiff *", "global error measure metric; a larger value of Pearson’s correlation coefficient indicates an", "= np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r = np.divide(r_num,r_den) return", "be given, e.g. capacity factor, average CSI,... Both vectors must have same length,", "y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) )", "of irradiance values :param cls: float vector of corresponding clear sky irradiance values", "0 xdiff2 = 0 ydiff2 = 0 cnt = 0 for idx in", "Increments are calculated in the second dimension, while iterating is done on the", "0: Xm = Xm.T if taxis == 0: ym = ym.T else: Xm", "variability\" Description: Solar Variability VI over a period of time is calculated as", "variability V. sscore = 1 means a perfect forecast. sscore = 0 means", "computed :returns: MaxAE \"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32)", "sky reference values :param t: average period in minutes :param cmin: minimum values", "a perfect forecast. sscore = 0 means the variability dominates the forecast. By", "kurtosis. In the subsequent anIn [142]: U alysis, the term kurtosis will be", "forecast values :param p: Vector of reference forecast :returns: FS \"\"\" err1 =", "under-forecast) is important in that the system actions taken to correct for under-forecasting", "distribution and that of the normal distribution is known as the excess kurtosis.", "+ \"MEAN FOR = %.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y / c))", "x * y, x, y)) num = psum - (sum_x * sum_y/n) den", ":param y: vector of forecasts :param fac: value for normalization (e.g. capacity factor,", "= np.nan if taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis),", "(np.nanmean(x), np.nanmean(x / c)) a = a + \"MEAN FOR = %.4f (%.3f)", "if taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) ))", "plotted against time divided by the \"length\" of the clear sky irradiance plotted", ") def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating solar forecast models proposed", "taxis >= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac )", "same length, so pairs of elements with same index are compared. Description: Skewness", "of the clear sky irradiance plotted against time. On a clear day, VI", "enough if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return", "in the calculations. default is 50 W/m2. :return U: forecast uncertainty \"\"\" return", "(x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE) if an", "arr[tuple(items)] nd = x.ndim y = cls.copy() # don't use values for low", "calculated as the ratio of the \"length\" of the measured irradiance plotted against", "of solar forecasting models\" Description: The metric sscore is calculated as the ratio", "difference between two cumulative distribution functions (CDFs), expressed as :param x: Vector of", "asymmetry of the probability distribution, and is the third standardized moment Assuming that", "observations :param y: vector of forecasts :param taxis (optional): Axis along which the", "return ( 1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness of the", "values :returns: IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) -", "variability index: A new and novel metric for quantifying irradiance and pv output", "in the power system. :param x: vector of observations :param y: vector of", "+ TN) , float((TP + FP + FN + TN))) #return accuracy_score(y_true, y_pred,", "vector :param p: reference vector :returns a: a string with a number of", "a positive kurtosis value is known as leptokurtic, which indicates a peaked distribution;", "+= xdiff * ydiff xdiff2 += xdiff * xdiff ydiff2 += ydiff *", "wh = shape[taxis] shape[taxis] = nbins TP = np.empty(shape) FP = np.empty(shape) else:", "or 1s. \"\"\" #from sklearn.metrics import accuracy_score TP = np.sum((y_pred == True) &", "VI. :param x: vector if irradiance values :param cls: vector of clear sky", "third standardized moment Assuming that forecast errors are equal to forecast power minus", "variance of the data which is to be modeled Input: :param x: Vector", "1: # clear sky index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 =", "Axis along which the means are computed :returns: MaxAE \"\"\" if taxis >=", "(%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a", "of error metrics :param x: observation vector :param y: forecast vector :param c:", "with same index are compared. Description: Skewness is a measure of the asymmetry", "True),axis=taxis) return np.divide( (TP + TN) , float((TP + FP + FN +", "= %.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y / c)) a = a", "extreme events in the power system. :param x: vector of observations :param y:", "can use single-dimensional obervation and clear sky vectors with subsequent and temporal equidistant", "error metrics. Defintion and description of some from Zhang et al., 2013, Metrics", "lead to a less than optimal number of large thermal units being committed,", "- np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a + \"Number of forecasts = %d", "Correlation Coefficient \"\"\" ndims = X.ndim assert ndims < 3 if taxis >=", "& (y_true == False),axis=taxis) FP = np.sum((y_pred == True) & (y_true == False),axis=taxis)", "value is known as leptokurtic, which indicates a peaked distribution; whereas a negative", "metric for evaluation of solar forecasting models\" Description: The metric sscore is calculated", "**kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC) :param x: observation vector", "sscore = 1 means a perfect forecast. sscore = 0 means the variability", "= np.nan x[cls<=cmin] = np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def", "vector of irradiance values :param cls: float vector of corresponding clear sky irradiance", "import division import numpy as np \"\"\" Different error metrics. Defintion and description", "+ \"SSCORE 60s = %.4f \\n \" % sscore(x, y, c, 60) if", "a = a + \"SSCORE 60s = %.4f \\n \" % sscore(x, y,", "1 and the worst value is 0. Look at sklearn.metrics.precision_score for details how", "* ydiff cnt += 1 if cnt == 0: return np.nan return diffprod", "local deviations of forecast errors. The MaxAE metric is useful to evaluate the", "the timeseries variability V. sscore = 1 means a perfect forecast. sscore =", "if N < 35: print(\"Number of data points for KSI not sufficient. N=\",N,\"<35\")", "the means are computed :returns: MAE \"\"\" if taxis >= 0: return np.nanmean(abs(x-y),", "np.divide(x[:,t],y[:,t]) # clear sky index for time t csi1 = np.divide(x[:,0],y[:,0]) # Difference", "csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear sky index for time", ":param y: Vector of forecast values :returns ksi: The KSI \"\"\" m =", "metric, does not excessively account for extreme forecast events. :param x: vector of", "variability V as introduced in Marquez and Coimbra (2012) \"proposed metric for evaluation", "% mbe(x, y) a = a + \"CORR = %.4f \\n \" %", "second dimension. Increments are calculated in the second dimension, while iterating is done", "(y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a + \"RMSE = %.4f \\n", "an indicative of local deviations of forecast errors. The MaxAE metric is useful", "a clear sky solar irradiance so that the diurnal variability is neglected.\" This", "a + \"Number of forecasts = %d (%.2f) \\n \" % (y.shape[0] -", "VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined my Marquez and Coimbra,", "negative sscore means that the forecast performs worse than a persistence forecast. :param", "along which the means are computed :returns: Correlation Coefficient \"\"\" ndims = X.ndim", "t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky index for time", "negative skewness leads to an under-forecasting tail. The tendency to over-forecast (or under-forecast)", "forecast error if an observation and forecast vector are given. Both vectors must", "the Kolmogorov-Smirnow Test Integral (KSI) The KSI and OVER metrics were proposed by", "the errors to the variance of the data which is to be modeled", "err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) ) def", "plotted against time. On a clear day, VI would be ~ 1. The", ">= 0: shape = list(x.shape) wh = shape[taxis] shape[taxis] = nbins TP =", "vector of clear sky reference values :param t: timelag for variability calculations :param", "ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm)", "kurtosis will be treated synonymously with excess kurtosis. A distribution with a positive", "kurtosis. A distribution with a positive kurtosis value is known as leptokurtic, which", "return arr[tuple(items)] nd = x.ndim y = cls.copy() # don't use values for", "prody[np.isnan(prod1)] = np.nan if taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den = np.sqrt(", "negative kurtosis indicates a flat data distribution, known as platykurtic. The pronounced peaks", "mean absolute percentage error (MAPE) if an observation and forecast vector are given.", "y, p) a = a + \"MEAN OBS = %.4f (%.3f) \\n \"", "Axis along which the means are computed :returns: RMSE \"\"\" if taxis >=", "Pearson’s correlation coefficientq is a global error measure metric; a larger value of", "´s´ to ´e´ at the specified axis ´taxis´\"\"\" irange = slice(s,e) items =", "mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error (MAPE) if an observation and forecast", "of the solar irradiance over a subset time window of Nw data points\"", "Both vectors must have same length, so pairs of elements with same index", "- ( Error(Forecast) / Error(Reference) ) :param x: Vector of observation values :param", "the variance of the errors to the variance of the data which is", "a variability index defined by Stein et al. \"The variability index: A new", "or 1s. \"\"\" from sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1):", "and clear sky vectors with subsequent and temporal equidistant instances ( timeseries ).", "minmax: range of thresholds, give a tupel (e.g. (0,1) in ) :param nbins:", "# calculate standard deviation only if number of datapoints is large enough if", "a global error measure during the entire forecasting period. :param x: vector of", "The KSI \"\"\" m = 100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y", "(0,1) in ) :param nbins: number of bins/thresholds inside the range :returns tp,fp:", "sscore is calculated as the ratio of the above defined forecast uncertainity U", "vector :param y: forecast vector :param c: clear sky vector :param p: reference", "50 W/m2. :returns: deltak = vector of clear sky index increments :returns: V", "day, VI would be ~ 1. The same is for very overcast days.", "len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2),", "% FS(x, y, p) a = a + \"MEAN OBS = %.4f (%.3f)", "the difference of two IQR. Input: :param x: Vector of observation values :param", "observation values :param y: Vector of forecast values :param p: Vector of reference", "to an over-forecasting tail, and a negative skewness leads to an under-forecasting tail.", "compared. Description: Same as MAE but normalized differences are normalized to a given", ":param x: vector of irradiance values :param y: vector of irradiance forecasts :param", "are given. Both vectors must have same length, so pairs of elements with", ":returns: deltak = vector of clear sky index increments :returns: V = solar", "persistence forecast has a sscore = 0. A negative sscore means that the", "of binary forecasts you can use boolean arrays or just 0 or 1s.", "at the specified axis ´taxis´\"\"\" irange = slice(s,e) items = [slice(None, None, None)]", "a + \"SSCORE Persistence 60s = %.4f \\n \" % sscore(x, p, c,", "solar irradiance over a subset time window of Nw data points\" :param x:", "provides a global error measure during the entire forecasting period. :param x: vector", "a less than optimal number of large thermal units being committed, which need", "defined by Stein et al. \"The variability index: A new and novel metric", "pairs of elements with same index are compared. Description: The RMSE provides a", "a = a + \"MEAN CLS = %.4f \\n \" % np.nanmean(c) a", "= 0 for idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff =", "time t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) # calculate standard", "if taxis >= 0: shape = list(x.shape) wh = shape[taxis] shape[taxis] = nbins", "Skewness \"\"\" from scipy.stats import skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis", "indicative of local deviations of forecast errors. The MaxAE metric is useful to", "a string with a number of metrics\"\"\" a = \"Number of measurements =", "/ float(y.shape[0])) a = a + \"RMSE = %.4f \\n \" % rmse(x,", "deviation of all increments. :param x: float vector of irradiance values :param cls:", "= X.ndim assert ndims < 3 if taxis >= 0: Xm = np.nanmean(X,axis=taxis,", "1 means a perfect forecast. sscore = 0 means the variability dominates the", "over a period of time is calculated as the ratio of the \"length\"", "\"MEAN OBS = %.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x / c)) a", "which indicates a peaked distribution; whereas a negative kurtosis indicates a flat data", "very small forecast errors :param x: vector of observations :param y: vector of", "Observation maximum and minimum Pmax = np.max(x); Pmin = np.min(x) # Interval distance", "(np.nanmean(y), np.nanmean(y / c)) a = a + \"MEAN CLS = %.4f \\n", "forecasts :param taxis (optional): Axis along which the means are computed :returns: Correlation", "forecasts :param taxis (optional): Axis along which the means are computed :returns: RMSE", "which the means are computed :returns: Correlation Coefficient \"\"\" ndims = X.ndim assert", "taxis == 0: Xm = Xm.T if taxis == 0: ym = ym.T", "for evaluation of solar forecasting models\" Description: \"Solar variability V is the standard", "if taxis == 0: ym = ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym", "optional: minimum values of clear sky reference to be used in the calculations.", "in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx] - avg_x ydiff", "a + \"SSCORE 60s = %.4f \\n \" % sscore(x, y, c, 60)", "= 1.63 / np.sqrt(N) D = np.max(cdf_x - cdf_y) # Observation maximum and", "if den == 0: return 0 return num / den def pearson(x, y):", "will lead to higher values of VI. :param x: vector if irradiance values", "of thresholds \"\"\" if taxis >= 0: shape = list(x.shape) wh = shape[taxis]", "important in that the system actions taken to correct for under-forecasting and over-forecasting", "irradiance values y[cls<=cmin] = np.nan if nd == 1: # clear sky index", "by Marquez and Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\"", "< 3 if taxis >= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis,", "diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation Coefficient (with", "else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y, taxis=-1): \"\"\" Calculates maximum", "means a perfect forecast. sscore = 0 means the variability dominates the forecast.", "to correct for under-forecasting and over-forecasting events are not equal. An over-forecasting tendency", "time window of Nw data points\" :param x: vector of irradiance values :param", "process. :param x: vector of observations :param y: vector of forecasts :param taxis", "np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) # calculate standard deviation only if number", "summary of error metrics :param x: observation vector :param y: forecast vector :param", "taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast", "and is the third standardized moment Assuming that forecast errors are equal to", "sliced from ´s´ to ´e´ at the specified axis ´taxis´\"\"\" irange = slice(s,e)", "Calculate mean absolute error (MaxAE) if an observation and forecast vector are given.", "positives. The precision is intuitively the ability of the classifier not to label", ") avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 = 0 ydiff2 =", "y: vector of forecasts :returns: Kurtosis \"\"\" from scipy.stats import kurtosis return kurtosis(x-y)", "12. The Kolmogorov–Smirnov (KS) test is a nonparametric test to determine if two", "of forecasts :returns: Correlation Coefficient \"\"\" assert len(x) == len(y) n = len(x)", "0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0): \"\"\" Calculates Forecast Skill (FS)", "Calculates Forecast Skill (FS) FS is defined as 1 - ( Error(Forecast) /", "= np.subtract(csi0,csi1) # calculate standard deviation only if number of datapoints is large", "the solar irradiance over a subset time window of Nw data points\" :param", "with same index are compared. Description: The MAE has been widely used in", "iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR Diff) of a two given datasets", "skewness of the forecast errors leads to an over-forecasting tail, and a negative", "= rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) ) def skewness(x,y):", "V is the standard deviation of the step-changes of the measured solar irradiance", "length, so pairs of elements with same index are compared. Description: The MBE", "= vector of clear sky index increments :returns: V = solar variability \"\"\"", "Description: (not from the paper) IQR is the difference between the 75th percentile", "error (MBE) if an observation and forecast vector are given. Both vectors must", "The MaxAE is an indicative of local deviations of forecast errors. The MaxAE", "Zhang et al., 2013, Metrics for Evaluating the Accuracy of Solar Power Forecasting,", "the distribution, or, conversely, how fat-tailed the distribution is, and is the fourth", "forecasts :returns: Correlation Coefficient \"\"\" assert len(x) == len(y) n = len(x) assert", "1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness of the probability distribution", "A distribution with a positive kurtosis value is known as leptokurtic, which indicates", "array ´arr´ sliced from ´s´ to ´e´ at the specified axis ´taxis´\"\"\" irange", "of the \"length\" of the measured irradiance plotted against time divided by the", "import numpy as np \"\"\" Different error metrics. Defintion and description of some", "xdiff2 = 0 ydiff2 = 0 cnt = 0 for idx in range(n):", "c, 60) if p != \"\": a = a + \"FS = %.4f", "the means are computed :returns: MBE \"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32)", "FOR = %.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y / c)) a =", "taxis (optional): Axis along which the means are computed :returns: RMSE \"\"\" if", "is the fourth standardized moment The difference between the kurtosis of a sample", "with same index are compared. Description: The RMSE provides a global error measure", "cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 +", "define the uncertainty as the standard deviation of a model forecast error divided", "\"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return iqr_x", "dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 =", "= np.nan prody[np.isnan(prod1)] = np.nan if taxis >= 0: r_num = np.nansum(prod1,axis=taxis) r_den", "same index are compared. Description: The MBE metric intends to indicate average forecast", "Vector of observation values :param y: Vector of forecast values :returns: IQR \"\"\"", "= cls.copy() # don't use values for low irradiance values y[cls<=cmin] = np.nan", "= np.sum((y_pred == True) & (y_true == True),axis=taxis) TN = np.sum((y_pred == False)", "= np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym)", "same length, so pairs of elements with same index are compared. Description: Kurtosis", "of solar forecasting models\" Description: \"Solar variability V is the standard deviation of", "a + \"MEAN FOR = %.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y /", "% (np.nanmean(y), np.nanmean(y / c)) a = a + \"MEAN CLS = %.4f", "* sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq -", "distribution, and is the third standardized moment Assuming that forecast errors are equal", "np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) ))", "np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a + \"RMSE = %.4f", "axis ´taxis´\"\"\" irange = slice(s,e) items = [slice(None, None, None)] * ndims items[taxis]", "# clear sky index for time t+deltat #csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:])", ":param t: timelag for variability calculations :param cmin: minimum values of clear sky", "\"\"\" Calculate skewness of the probability distribution of the forecast error if an", "clear sky index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if", "Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym =", "np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 = 0 ydiff2 = 0 cnt =", "to evaluate forecast performance. The MAE metric is also a global error measure", "time) of irradiance will lead to higher values of VI. :param x: vector", "- minmax[0] cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred", "Correlation Coefficient Description: Pearson’s correlation coefficient is a global error measure metric; a", "+ \"FS = %.4f \\n \" % FS(x, y, p) a = a", "y: vector of forecasts :param fac: value for normalization (e.g. capacity factor, mean", "the absolute difference between two cumulative distribution functions (CDFs), expressed as :param x:", "Pearson’s correlation coefficient is a global error measure metric; a larger value of", "= np.empty(shape) FP = np.empty(shape) else: TP = np.empty(nbins) FP = np.empty(nbins) x", "a nonparametric test to determine if two data sets are significantly different. The", "= np.empty(shape) else: TP = np.empty(nbins) FP = np.empty(nbins) x = x.flatten() y", "to higher values of VI. :param x: vector if irradiance values :param cls:", "den def pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s correlation coefficient", "\"\"\" Calculates Pearson Correlation Coefficient (with axis functionality) Description: Pearson’s correlation coefficientq is", "in the first axis. Variability is then calculated as the standard deviation of", "precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC) :param x:", "(y_true == False),axis=taxis) FP = np.sum((y_pred == True) & (y_true == False),axis=taxis) FN", "values of VI. :param x: vector if irradiance values :param cls: vector of", "x, y)) num = psum - (sum_x * sum_y/n) den = pow((sum_x_sq -", "np.percentile(y,75) - np.percentile(y,25) return iqr_x - iqr_y def r2(y,x): \"\"\" Calculates coefficient of", "Description: Same as MAE but normalized differences are normalized to a given value.", "are computed :returns: MAE \"\"\" if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else:", "y = y.flatten() wh = x.shape[0] ra = minmax[1] - minmax[0] cnt =", "is 50 W/m2. :returns: deltak = vector of clear sky index increments :returns:", "FP[cnt] cnt += 1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score:", "0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error", "a = \"Number of measurements = %d (%.2f) \\n \" % (x.shape[0] -", "# Assume len(x) == len(y) n = len(x) sum_x = float(sum(x)) sum_y =", "\"\"\" Calculating a metric for evaluating solar forecast models proposed by Marquez and", "True) & (y_true == True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred == True) &", "sample that is negative. The best value is 1 and the worst value", "elements with same index are compared. Description: Kurtosis is a measure of the", "= a + \"Number of forecasts = %d (%.2f) \\n \" % (y.shape[0]", "differences are normalized to a given value. :param x: vector of observations :param", "of observation values :param y: Vector of forecast values :param p: Vector of", "Variability VI over a period of time is calculated as the ratio of", "axis. If two-dimensional vectors are provided subsequent instances must be in the second", "sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x, 2), x)) sum_y_sq = sum(map(lambda", "\"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating", "metrics. Defintion and description of some from Zhang et al., 2013, Metrics for", "False),axis=taxis) FN = np.sum((y_pred == False) & (y_true == True),axis=taxis) return np.divide( (TP", "= np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if taxis", "(KSI) The KSI and OVER metrics were proposed by Espinar et al. 12.", "np.nanmean(X, dtype=np.float32) ym = np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1", ":param taxis (optional): Axis along which the means are computed :returns: Correlation Coefficient", "Different error metrics. Defintion and description of some from Zhang et al., 2013,", "the normal distribution is known as the excess kurtosis. In the subsequent anIn", "= x.flatten() y = y.flatten() wh = x.shape[0] ra = minmax[1] - minmax[0]", "distribution with a positive kurtosis value is known as leptokurtic, which indicates a", "iqr_x - iqr_y def r2(y,x): \"\"\" Calculates coefficient of determination R^2 Description: R^2", "return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as introduced in Marquez", "Higher variability (changes in time) of irradiance will lead to higher values of", "#csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear sky", "FS(x, y, p) a = a + \"SSCORE Persistence 60s = %.4f \\n", "Same as MAE but normalized differences are normalized to a given value. :param", "skill. :param x: Vector of obserations :param y: Vector of forecasts :returns: Correlation", "= irange return arr[tuple(items)] nd = x.ndim y = cls.copy() # don't use", ":param x: Vector of obserations :param y: Vector of forecasts :param taxis (optional):", "sum_y_sq = sum(map(lambda x: pow(x, 2), y)) psum = sum(map(lambda x, y: x", "error divided by the esti- mated clear sky value of the solar irradiance", "into Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI)", "sscore means that the forecast performs worse than a persistence forecast. :param x:", "KSI and OVER metrics were proposed by Espinar et al. 12. The Kolmogorov–Smirnov", "is defined as 1 - ( Error(Forecast) / Error(Reference) ) :param x: Vector", ")**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating solar forecast", "t in indizes for increments :param cmin: float, optional: minimum values of clear", "of the measured irradiance plotted against time divided by the \"length\" of the", "den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2)", "in ) :param nbins: number of bins/thresholds inside the range :returns tp,fp: returns", "are compared. Description: The MaxAE is an indicative of local deviations of forecast", "kurtosis indicates a flat data distribution, known as platykurtic. The pronounced peaks of", "clear sky vectors with subsequent and temporal equidistant instances ( timeseries ). Increments", "def slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´ sliced from ´s´ to ´e´", "time. On a clear day, VI would be ~ 1. The same is", "length, so pairs of elements with same index are compared. Description: The RMSE", "leads to an under-forecasting tail. The tendency to over-forecast (or under-forecast) is important", "np.multiply( diffx, diffy ) prodx = np.multiply( diffx, diffx ) prody = np.multiply(", "timeseries ). Increments are calculated with an moving window along this axis. If", "of measurements = %d (%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] -", "IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25) return", "or just 0 or 1s. \"\"\" #from sklearn.metrics import accuracy_score TP = np.sum((y_pred", "* xdiff ydiff2 += ydiff * ydiff cnt += 1 if cnt ==", "sscore(x, y, c, 60) if p != \"\": a = a + \"FS", "skill. :param x: Vector of obserations :param y: Vector of forecasts :param taxis", "num / den def pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s", "calculated with an moving window along this axis. If two-dimensional vectors are provided", "clear sky reference values :param cmin: minimum values of clear sky reference to", "np.nanmean(y / c)) a = a + \"MEAN CLS = %.4f \\n \"", "of the probability distribution of the forecast error if an observation and forecast", "np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error", "slice(s,e) items = [slice(None, None, None)] * ndims items[taxis] = irange return arr[tuple(items)]", "is done on the values in the first axis. Variability is then calculated", "sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis),", "reference values :param t: average period in minutes :param cmin: minimum values of", "Understanding the overall forecast bias (over- or under- forecasting) would allow power system", "value for normalization (e.g. capacity factor, mean csi) :param taxis (optional): Axis along", "r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den = np.sqrt(", "0: return np.nan return diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates", "\"\"\" ndims = X.ndim assert ndims < 3 if taxis >= 0: Xm", "dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis == 0: Xm =", "the forecasting of short-term extreme events in the power system. :param x: vector", "as MAE but normalized differences are normalized to a given value. :param x:", "None)] * ndims items[taxis] = irange return arr[tuple(items)] nd = x.ndim y =", "as positive a sample that is negative. The best value is 1 and", "to that of a clear sky solar irradiance so that the diurnal variability", "forecasting models\" Description: \"Solar variability V is the standard deviation of the step-changes", ":returns: MAPE \"\"\" if taxis >= 0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else:", "0: return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def", "increments :returns: V = solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns the input", "forecast vector :param minmax: range of thresholds, give a tupel (e.g. (0,1) in", "KSI \"\"\" m = 100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins) cdf_y =", "Description: The MAE has been widely used in regression problems and by the", "Power into Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral", "of the asymmetry of the probability distribution, and is the third standardized moment", "MAE \"\"\" if taxis >= 0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def", "is a global error measure metric; a larger value of Pearson’s correlation coefficient", "positive skewness of the forecast errors leads to an over-forecasting tail, and a", "In the subsequent anIn [142]: U alysis, the term kurtosis will be treated", "VI \"\"\" y = cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32)", "(over- or under- forecasting) would allow power system operators to better allocate resources", "step-changes of the measured solar irradiance to that of a clear sky solar", "* np.nansum(prody) ) r = np.divide(r_num,r_den) return r def rmse(x,y,taxis=-1): \"\"\" Calculates root", "accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case of binary forecasts you can use", "system actions taken to correct for under-forecasting and over-forecasting events are not equal.", "is a measure of the magnitude of the peak of the distribution, or,", "to over-forecast (or under-forecast) is important in that the system actions taken to", "the RMSE metric, does not excessively account for extreme forecast events. :param x:", "deviation of a model forecast error divided by the esti- mated clear sky", "= a + \"BIAS = %.4f \\n \" % mbe(x, y) a =", "sky index for time t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1)", "MAE metric is also a global error measure metric, which, unlike the RMSE", "np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den =", "´arr´ sliced from ´s´ to ´e´ at the specified axis ´taxis´\"\"\" irange =", "OVER metrics were proposed by Espinar et al. 12. The Kolmogorov–Smirnov (KS) test", "1. The same is for very overcast days. Higher variability (changes in time)", "Forecasting, conference paper, 3rd International Workshop on Integration of Solar Power into Power", "r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as introduced in Marquez and", "the means are computed :returns: MAPE \"\"\" if taxis >= 0: return np.nanmean(abs(", "as leptokurtic, which indicates a peaked distribution; whereas a negative kurtosis indicates a", "x, y: x * y, x, y)) num = psum - (sum_x *", "Calculates root mean square error (RMSE) if an observation and forecast vector are", "elements with same index are compared. Description: The MaxAE is an indicative of", "of a two given datasets Description: (not from the paper) IQR is the", "- np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness of the probability distribution of", "prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if taxis >= 0: r_num = np.nansum(prod1,axis=taxis)", "points for KSI not sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63 / np.sqrt(N)", "y) if p != \"\": a = a + \"FS = %.4f \\n", "np.nanmean(y, dtype=np.float32) diffx = np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply( diffx, diffy", "Increments are calculated with an moving window along this axis. If two-dimensional vectors", "clear sky vector :param p: reference vector :returns a: a string with a", "the ratio tp / (tp + fp) where tp is the number of", "global error measure during the entire forecasting period. :param x: vector of observations", "power system. :param x: vector of observations :param y: vector of forecasts :param", "kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference (IQR Diff) of", "Pearson Correlation Coefficient (with axis functionality) Description: Pearson’s correlation coefficientq is a global", "def precision(y_true,y_pred,**kwargs): \"\"\" Compute the precision using sklearn module sklearn.metrics.precision_score The precision is", "skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of the probability distribution of the forecast", "the second dimension, while iterating is done on the values in the first", "Correlation Coefficient \"\"\" assert len(x) == len(y) n = len(x) assert n >", "increments :param cmin: float, optional: minimum values of clear sky reference to be", "\"\"\" Calculate mean biae error (MBE) if an observation and forecast vector are", "reference to be used in the calculations. default is 50 W/m2. :returns: deltak", "== len(y) n = len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq =", "introduced in Marquez and Coimbra (2012) \"proposed metric for evaluation of solar forecasting", "normal distribution is known as the excess kurtosis. In the subsequent anIn [142]:", "Axis along which the means are computed :returns: Correlation Coefficient \"\"\" ndims =", "np.nan return diffprod / np.sqrt(xdiff2 * ydiff2) def vcorrcoef(X,y,taxis=-1): \"\"\" Calculates Pearson Correlation", "of Nw data points\" :param x: vector of irradiance values :param y: vector", "Solar irradiance variability score ( scalar ) VI \"\"\" y = cls.copy() y[cls<=cmin]", "return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE)", "of forecast values :returns: R^2 \"\"\" r2 = 1 - ( np.nanvar(y-x) /", "return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage error (MAPE) if an", "dispatch process. :param x: vector of observations :param y: vector of forecasts :returns:", "Nw data points\" :param x: vector of irradiance values :param y: vector of", "== True) & (y_true == True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred == True)", ":param y: Vector of forecast values :param p: Vector of reference forecast :returns:", "import precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve", "to better allocate resources for compensating forecast errors in the dispatch process. :param", "= np.nansum(prod1,axis=taxis) r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) )) else: r_num = np.nansum(prod1) r_den", "allow power system operators to better allocate resources for compensating forecast errors in", "some from Zhang et al., 2013, Metrics for Evaluating the Accuracy of Solar", "for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2:", "range :returns tp,fp: returns vector of true positive TP and false positive FP", "forecast. :param x: vector of irradiance values :param y: vector of irradiance forecasts", ")) else: r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r", "calculations. default is 50 W/m2. :returns: Solar irradiance variability score ( scalar )", "has a sscore = 0. A negative sscore means that the forecast performs", "TN = np.sum((y_pred == False) & (y_true == False),axis=taxis) FP = np.sum((y_pred ==", "a sample that is negative. The best value is 1 and the worst", "t: average period in minutes :param cmin: minimum values of clear sky reference", "lead to higher values of VI. :param x: vector if irradiance values :param", "forecasts = %d (%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y)))", "p: reference vector :returns a: a string with a number of metrics\"\"\" a", "metrics were proposed by Espinar et al. 12. The Kolmogorov–Smirnov (KS) test is", "\\n \" % rmse(x, y) a = a + \"BIAS = %.4f \\n", "compared. Description: The MBE metric intends to indicate average forecast bias. Understanding the", "= np.multiply( diffx, diffx ) prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] =", "of Solar Power into Power Systems \"\"\" def ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow", "coefficientq is a global error measure metric; a larger value of Pearson’s correlation", ") prodx = np.multiply( diffx, diffx ) prody = np.multiply( diffy, diffy )", "same length, so pairs of elements with same index are compared. Description: The", "(optional): Axis along which the means are computed :returns: MBE \"\"\" if taxis", "fac: value for normalization (e.g. capacity factor, mean csi) :param taxis (optional): Axis", "\"\"\" Accuracy classification score: In case of binary forecasts you can use boolean", "+ \"SSCORE Persistence 60s = %.4f \\n \" % sscore(x, p, c, 60)", "forecasts :param cls: vector of clear sky reference values :param cmin: minimum values", "is intuitively the ability of the classifier not to label as positive a", "of the absolute difference between two cumulative distribution functions (CDFs), expressed as :param", "for extreme forecast events. :param x: vector of observations :param y: vector of", "np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness of the probability distribution of the", "\" % np.nanmean(c) a = a + \"SSCORE 60s = %.4f \\n \"", "of irradiance will lead to higher values of VI. :param x: vector if", "the worst value is 0. Look at sklearn.metrics.precision_score for details how to use", "def prints(x, y, c, p=\"\"): \"\"\" Gives a summary of error metrics :param", "a + \"MEAN OBS = %.4f (%.3f) \\n \" % (np.nanmean(x), np.nanmean(x /", "TN) , float((TP + FP + FN + TN))) #return accuracy_score(y_true, y_pred, **kwargs)", "ydiff2 = 0 cnt = 0 for idx in range(n): if np.isnan(x[idx]) or", "performs worse than a persistence forecast. :param x: vector of irradiance values :param", "of forecasts :returns: Kurtosis \"\"\" from scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y):", "Calculates Pearson Correlation Coefficient Description: Pearson’s correlation coefficient is a global error measure", "= sum(map(lambda x: pow(x, 2), x)) sum_y_sq = sum(map(lambda x: pow(x, 2), y))", "= 0 cnt = 0 for idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]):", "for Evaulation of Solar Forecasting Models\") \"Here we define the uncertainty as the", "data which is to be modeled Input: :param x: Vector of observation values", ":param p: reference vector :returns a: a string with a number of metrics\"\"\"", "in ths: y_pred = y >= th y_true = x >= th TP[cnt]", "W/m2. :return U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32)", "> 0 avg_x = np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0", "), axis=taxis,dtype=np.float32) else: return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean", "Kurtosis is a measure of the magnitude of the peak of the distribution,", "p != \"\": a = a + \"FS = %.4f \\n \" %", "/ n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5) if den ==", "else: r_num = np.nansum(prod1) r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) ) r =", ">= 0: Xm = np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1)", "all increments. :param x: float vector of irradiance values :param cls: float vector", "= pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) /", "is an indicative of local deviations of forecast errors. The MaxAE metric is", "et al., 2013, Metrics for Evaluating the Accuracy of Solar Power Forecasting, conference", "known as leptokurtic, which indicates a peaked distribution; whereas a negative kurtosis indicates", "\\n \" % np.nanmean(c) a = a + \"SSCORE 60s = %.4f \\n", "maximum and minimum Pmax = np.max(x); Pmin = np.min(x) # Interval distance d", "so pairs of elements with same index are compared. Description: The MAE has", "pairs of elements with same index are compared. Description: The MBE metric intends", "TP[cnt], FP[cnt] cnt += 1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification", "- np.percentile(y,25) return iqr_x - iqr_y def r2(y,x): \"\"\" Calculates coefficient of determination", "\"MEAN CLS = %.4f \\n \" % np.nanmean(c) a = a + \"SSCORE", "irradiance to that of a clear sky solar irradiance so that the diurnal", "cnt = 0 ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred = y", "means are computed :returns: MaxAE \"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else:", "of observations :param y: vector of forecasts :returns: Skewness \"\"\" from scipy.stats import", "single-dimensional obervation and clear sky vectors with subsequent and temporal equidistant instances (", "under-forecasting tail. The tendency to over-forecast (or under-forecast) is important in that the", "%.4f \\n \" % FS(x, y, p) a = a + \"SSCORE Persistence", "% sscore(x, y, c, 60) if p != \"\": a = a +", "of forecast values :returns ksi: The KSI \"\"\" m = 100.0 nbins =", "system operators to better allocate resources for compensating forecast errors in the dispatch", "= np.nan return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) ) def precision(y_true,y_pred,**kwargs): \"\"\" Compute", ":returns: IQR \"\"\" iqr_x = np.percentile(x,75) - np.percentile(x,25) iqr_y = np.percentile(y,75) - np.percentile(y,25)", "Skewness is a measure of the asymmetry of the probability distribution, and is", "be treated synonymously with excess kurtosis. A distribution with a positive kurtosis value", "y >= th y_true = x >= th TP[cnt] = np.sum((y_pred == True)", "True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred == True) & (y_true == False),axis=taxis) /", "of the magnitude of the peak of the distribution, or, conversely, how fat-tailed", "is the difference between the 75th percentile and the 25th percentile. This function", "accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c, p=\"\"): \"\"\" Gives a summary of", "the number of true positives and fp the number of false positives. The", "false positive FP for the given range of thresholds \"\"\" if taxis >=", "for time t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) # calculate", "x: observation vector :param y: forecast vector :param c: clear sky vector :param", "The RMSE provides a global error measure during the entire forecasting period. :param", "the paper) IQR is the difference between the 75th percentile and the 25th", "/ float(wh) #print th, TP[cnt], FP[cnt] cnt += 1 return TP, FP def", "y, c, 60) if p != \"\": a = a + \"FS =", "np.divide(sum1,sum2) return VI def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined my Marquez", "forecast events. :param x: vector of observations :param y: vector of forecasts :param", "!= \"\": a = a + \"FS = %.4f \\n \" % FS(x,", "by the esti- mated clear sky value of the solar irradiance over a", "metric is also a global error measure metric, which, unlike the RMSE metric,", "ratio of the above defined forecast uncertainity U and the timeseries variability V.", "over-forecasting tendency could lead to a less than optimal number of large thermal", "forecast vector are given. Additionaly a normalizing value must be given, e.g. capacity", "indicates a peaked distribution; whereas a negative kurtosis indicates a flat data distribution,", "np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute error (MaxAE) if an observation and", "index for time t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) #", "the precision using sklearn module sklearn.metrics.precision_score The precision is the ratio tp /", "~ 1. The same is for very overcast days. Higher variability (changes in", "the input array ´arr´ sliced from ´s´ to ´e´ at the specified axis", "only if number of datapoints is large enough if np.sum(np.isfinite(deltak)) > 5: V", "power, a positive skewness of the forecast errors leads to an over-forecasting tail,", "pairs of elements with same index are compared. Description: Kurtosis is a measure", "pow(x, 2), y)) psum = sum(map(lambda x, y: x * y, x, y))", "given range of thresholds \"\"\" if taxis >= 0: shape = list(x.shape) wh", "x: vector if irradiance values :param cls: vector of clear sky reference values", "probability distribution of the forecast error if an observation and forecast vector are", "(x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a + \"Number of forecasts =", "def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC) :param x: observation vector :param", "ksi: The KSI \"\"\" m = 100.0 nbins = 100 cdf_x = cdf(x,nbins=nbins)", "np.nanmean(x / c)) a = a + \"MEAN FOR = %.4f (%.3f) \\n", "(y_true == True),axis=taxis) return np.divide( (TP + TN) , float((TP + FP +", "Vector of forecasts :param taxis (optional): Axis along which the means are computed", "is important in that the system actions taken to correct for under-forecasting and", "= np.sum((y_pred == True) & (y_true == True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred", "**kwargs) def prints(x, y, c, p=\"\"): \"\"\" Gives a summary of error metrics", "= len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(map(lambda x: pow(x,", "are compared. Description: Same as MAE but normalized differences are normalized to a", "irange return arr[tuple(items)] nd = x.ndim y = cls.copy() # don't use values", "index are compared. Description: Skewness is a measure of the asymmetry of the", "Forecasting Models\") \"Here we define the uncertainty as the standard deviation of a", "(y_true == True),axis=taxis) / float(wh) FP[cnt] = np.sum((y_pred == True) & (y_true ==", "of the distribution, or, conversely, how fat-tailed the distribution is, and is the", "and temporal equidistant instances ( timeseries ). Increments are calculated with an moving", "a measure of the magnitude of the peak of the distribution, or, conversely,", "are computed :returns: MAPE \"\"\" if taxis >= 0: return np.nanmean(abs( (x-y)/fac ),", "metric sscore is calculated as the ratio of the above defined forecast uncertainity", "(sum_y_sq - pow(sum_y, 2) / n), 0.5) if den == 0: return 0", "0 cnt = 0 for idx in range(n): if np.isnan(x[idx]) or np.isnan(y[idx]): continue", "ndims items[taxis] = irange return arr[tuple(items)] nd = x.ndim y = cls.copy() #", "= a + \"SSCORE Persistence 60s = %.4f \\n \" % sscore(x, p,", "shape[taxis] = nbins TP = np.empty(shape) FP = np.empty(shape) else: TP = np.empty(nbins)", "the peak of the distribution, or, conversely, how fat-tailed the distribution is, and", "Vector of forecasts :returns: Correlation Coefficient \"\"\" assert len(x) == len(y) n =", ") / m ksi = np.sum(D) def pearsonr(x, y): # Assume len(x) ==", "dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if taxis", "compared. Description: The MAE has been widely used in regression problems and by", "(%.3f) \\n \" % (np.nanmean(y), np.nanmean(y / c)) a = a + \"MEAN", "x = x.flatten() y = y.flatten() wh = x.shape[0] ra = minmax[1] -", "divided by the \"length\" of the clear sky irradiance plotted against time. On", "number of false positives. The precision is intuitively the ability of the classifier", "to be used in the calculations. default is 50 W/m2. :returns: Solar irradiance", "= np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear sky index for time t+deltat", "%.4f \\n \" % pearson(x, y) if p != \"\": a = a", "a tupel (e.g. (0,1) in ) :param nbins: number of bins/thresholds inside the", ") VI \"\"\" y = cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 +", "clear sky index increments :returns: V = solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\"", "in time) of irradiance will lead to higher values of VI. :param x:", "new and novel metric for quantifying irradiance and pv output variability\" Description: Solar", "1 - ( Error(Forecast) / Error(Reference) ) :param x: Vector of observation values", "time divided by the \"length\" of the clear sky irradiance plotted against time.", "t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear", "sum(map(lambda x: pow(x, 2), x)) sum_y_sq = sum(map(lambda x: pow(x, 2), y)) psum", "of the forecast errors leads to an over-forecasting tail, and a negative skewness", "On a clear day, VI would be ~ 1. The same is for", "and novel metric for quantifying irradiance and pv output variability\" Description: Solar Variability", "if p != \"\": a = a + \"FS = %.4f \\n \"", "excessively account for extreme forecast events. :param x: vector of observations :param y:", "= np.divide(x[:,t],y[:,t]) # clear sky index for time t csi1 = np.divide(x[:,0],y[:,0]) #", "0: return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute", "% pearson(x, y) if p != \"\": a = a + \"FS =", "taxis (optional): Axis along which the means are computed :returns: Correlation Coefficient \"\"\"", ":param y: forecast vector :param minmax: range of thresholds, give a tupel (e.g.", "cls )**2, axis=taxis,dtype=np.float32) ) def sscore(x,y,cls,t,cmin=50.,taxis=0): \"\"\" Calculating a metric for evaluating solar", "return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case of binary", "\"\"\" Calculate Receiver Operating Curve (ROC) :param x: observation vector :param y: forecast", "length, so pairs of elements with same index are compared. Description: Same as", "the forecast performs worse than a persistence forecast. :param x: vector of irradiance", "root mean square error (RMSE) if an observation and forecast vector are given.", "observation values :param y: Vector of forecast values :returns: R^2 \"\"\" r2 =", "vector :returns a: a string with a number of metrics\"\"\" a = \"Number", "the forecast error if an observation and forecast vector are given. Both vectors", "[slice(None, None, None)] * ndims items[taxis] = irange return arr[tuple(items)] nd = x.ndim", "np.isnan(x[idx]) or np.isnan(y[idx]): continue xdiff = x[idx] - avg_x ydiff = y[idx] -", "import skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of the probability distribution", "(y_true == False),axis=taxis) FN = np.sum((y_pred == False) & (y_true == True),axis=taxis) return", "Coefficient (with axis functionality) Description: Pearson’s correlation coefficientq is a global error measure", "values :param y: vector of irradiance forecasts :param cls: vector of clear sky", "The difference between the kurtosis of a sample distribution and that of the", "y: Vector of forecasts :param taxis (optional): Axis along which the means are", "the standard deviation of a model forecast error divided by the esti- mated", "else: V = np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability", "y, c, p=\"\"): \"\"\" Gives a summary of error metrics :param x: observation", "n > 0 avg_x = np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod =", "solar forecasting models\" Description: The metric sscore is calculated as the ratio of", "measure of the magnitude of the peak of the distribution, or, conversely, how", "if cnt == 0: return np.nan return diffprod / np.sqrt(xdiff2 * ydiff2) def", "2013, Metrics for Evaluating the Accuracy of Solar Power Forecasting, conference paper, 3rd", "allocate resources for compensating forecast errors in the dispatch process. :param x: vector", "Operating Curve (ROC) :param x: observation vector :param y: forecast vector :param minmax:", "\" % (np.nanmean(y), np.nanmean(y / c)) a = a + \"MEAN CLS =", "(%.3f) \\n \" % (np.nanmean(x), np.nanmean(x / c)) a = a + \"MEAN", "means are computed :returns: MBE \"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else:", "sky index increments :returns: V = solar variability \"\"\" def slc(arr,s,e,ndims): \"\"\" returns", "power minus actual power, a positive skewness of the forecast errors leads to", "- np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a + \"RMSE = %.4f \\n \"", "\"\"\" Calculates maximum absolute error (MaxAE) if an observation and forecast vector are", "W/m2. :returns sscore: \"\"\" y[cls<=cmin] = np.nan x[cls<=cmin] = np.nan return 1 -", "boolean arrays or just 0 or 1s. \"\"\" #from sklearn.metrics import accuracy_score TP", "can use boolean arrays or just 0 or 1s. \"\"\" #from sklearn.metrics import", "A new and novel metric for quantifying irradiance and pv output variability\" Description:", "metric for evaluating solar forecast models proposed by Marquez and Coimbra (2012) \"proposed", "in the second dimension, while iterating is done on the values in the", "from the paper) IQR is the difference between the 75th percentile and the", "diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if taxis >= 0:", "means that the forecast performs worse than a persistence forecast. :param x: vector", "np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply( diffx, diffy ) prodx = np.multiply(", "or under- forecasting) would allow power system operators to better allocate resources for", "the overall forecast bias (over- or under- forecasting) would allow power system operators", "leptokurtic distribution represent a large number of very small forecast errors :param x:", "= 1 means a perfect forecast. sscore = 0 means the variability dominates", "cls: float vector of corresponding clear sky irradiance values :param t: int, optional:", "\" % FS(x, y, p) a = a + \"MEAN OBS = %.4f", "y: vector of forecasts :param taxis (optional): Axis along which the means are", "x: vector of observations :param y: vector of forecasts :param fac: value for", "the standard deviation of all increments. :param x: float vector of irradiance values", "= np.max(x); Pmin = np.min(x) # Interval distance d = ( Pmax -", ":returns: Skewness \"\"\" from scipy.stats import skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate", "cumulative distribution functions (CDFs), expressed as :param x: Vector of observation values :param", "index are compared. Description: Kurtosis is a measure of the magnitude of the", "positive kurtosis value is known as leptokurtic, which indicates a peaked distribution; whereas", "np.divide(x[0:-t],y[0:-t]) if nd == 2: # clear sky index for time t+deltat csi0", "a summary of error metrics :param x: observation vector :param y: forecast vector", "taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean", "metric is useful to evaluate the forecasting of short-term extreme events in the", "Metrics for Evaulation of Solar Forecasting Models\") \"Here we define the uncertainty as", "\"Number of forecasts = %d (%.2f) \\n \" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0]", "+= xdiff * xdiff ydiff2 += ydiff * ydiff cnt += 1 if", "very overcast days. Higher variability (changes in time) of irradiance will lead to", "of corresponding clear sky irradiance values :param t: int, optional: Timelag/stepsize t in", "is known as leptokurtic, which indicates a peaked distribution; whereas a negative kurtosis", "tendency could lead to a less than optimal number of large thermal units", "correlation coefficient indicates an improved solar forecasting skill. :param x: Vector of obserations", "minimum Pmax = np.max(x); Pmin = np.min(x) # Interval distance d = (", "pairs of elements with same index are compared. Description: Skewness is a measure", "taxis (optional): Axis along which the means are computed :returns: MAE \"\"\" if", "= np.divide(slc(x,t,None,nd),slc(y,t,None,nd)) csi0 = np.divide(x[t:],y[t:]) # clear sky index for time t #csi1", "so pairs of elements with same index are compared. Description: Skewness is a", "sum_x_sq = sum(map(lambda x: pow(x, 2), x)) sum_y_sq = sum(map(lambda x: pow(x, 2),", "forecasting skill. :param x: Vector of obserations :param y: Vector of forecasts :returns:", "/ np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability V as", "def U(x,y,cls,cmin=50.,taxis=0): \"\"\" Calculates \"Forecast Uncertainty\" as defined my Marquez and Coimbra, 2013", "values :param cls: float vector of corresponding clear sky irradiance values :param t:", ":param y: vector of forecasts :param taxis (optional): Axis along which the means", "csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) # calculate standard deviation only", "cls: vector of clear sky reference values :param t: average period in minutes", "to use In case of binary forecasts you can use boolean arrays or", "if nd == 2: # clear sky index for time t+deltat csi0 =", "+= 1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case", "Timelag/stepsize t in indizes for increments :param cmin: float, optional: minimum values of", "solar variability V as introduced in Marquez and Coimbra (2012) \"proposed metric for", "slc(arr,s,e,ndims): \"\"\" returns the input array ´arr´ sliced from ´s´ to ´e´ at", "1s. \"\"\" from sklearn.metrics import precision_score return precision_score(y_true, y_pred, **kwargs) def roc(x,y,minmax,nbins=100,taxis=-1): \"\"\"", "FP = np.empty(nbins) x = x.flatten() y = y.flatten() wh = x.shape[0] ra", "avg_x = np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 =", "( timeseries ). Increments are calculated with an moving window along this axis.", "False),axis=taxis) / float(wh) #print th, TP[cnt], FP[cnt] cnt += 1 return TP, FP", "string with a number of metrics\"\"\" a = \"Number of measurements = %d", "= np.nanmean(y, dtype=np.float32) diffprod = 0 xdiff2 = 0 ydiff2 = 0 cnt", "35: print(\"Number of data points for KSI not sufficient. N=\",N,\"<35\") return np.nan Vc", "FS \"\"\" err1 = rmse(x,y,taxis=taxis) err2 = rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2)", "ksi(fcst,obs): \"\"\" Calculates the Kolmogorov-Smirnow Test Integral (KSI) The KSI and OVER metrics", "probability distribution, and is the third standardized moment Assuming that forecast errors are", "Evaulation of Solar Forecasting Models\") \"Here we define the uncertainty as the standard", "pv output variability\" Description: Solar Variability VI over a period of time is", "FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy classification score: In case of binary forecasts you", "coefficient of determination R^2 Description: R^2 is a comparison of the variance of", "of clear sky reference values :param cmin: minimum values of clear sky reference", "using sklearn module sklearn.metrics.precision_score The precision is the ratio tp / (tp +", "= nbins TP = np.empty(shape) FP = np.empty(shape) else: TP = np.empty(nbins) FP", "MBE \"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32) def FS(x,y,p,method=\"RMSE\",taxis=0):", "the variability dominates the forecast. By definition a persistence forecast has a sscore", "50 W/m2. :return U: forecast uncertainty \"\"\" return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2,", "Vector of obserations :param y: Vector of forecasts :param taxis (optional): Axis along", "kurtosis of the probability distribution of the forecast error if an observation and", "pow(x, 2), x)) sum_y_sq = sum(map(lambda x: pow(x, 2), y)) psum = sum(map(lambda", "t csi1 = np.divide(x[:,0],y[:,0]) # Difference deltak = np.subtract(csi0,csi1) # calculate standard deviation", "of obserations :param y: Vector of forecasts :returns: Correlation Coefficient \"\"\" assert len(x)", "clear day, VI would be ~ 1. The same is for very overcast", "of the errors to the variance of the data which is to be", "= np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred = y >= th y_true =", "def pearson(x, y): \"\"\" Calculates Pearson Correlation Coefficient Description: Pearson’s correlation coefficient is", "np.empty(nbins) x = x.flatten() y = y.flatten() wh = x.shape[0] ra = minmax[1]", "forecasts :param taxis (optional): Axis along which the means are computed :returns: MaxAE", "irradiance forecasts :param cls: vector of clear sky reference values :param t: timelag", "np.nanmean(X,axis=taxis, dtype=np.float32) ym = np.nanmean(y,axis=taxis, dtype=np.float32) Xm = Xm.reshape(Xm.shape[0],1) ym = ym.reshape(ym.shape[0],1) if", "(tp + fp) where tp is the number of true positives and fp", "float vector of irradiance values :param cls: float vector of corresponding clear sky", "pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5) if", "Vector of forecast values :returns: R^2 \"\"\" r2 = 1 - ( np.nanvar(y-x)", "forecasts :param fac: value for normalization (e.g. capacity factor, mean csi) :param taxis", "Interquartile Range Difference (IQR Diff) of a two given datasets Description: (not from", "and fp the number of false positives. The precision is intuitively the ability", "the clear sky irradiance plotted against time. On a clear day, VI would", "observation vector :param y: forecast vector :param minmax: range of thresholds, give a", "60) if p != \"\": a = a + \"FS = %.4f \\n", "sklearn.metrics.precision_score for details how to use In case of binary forecasts you can", "neglected.\" This method can use single-dimensional obervation and clear sky vectors with subsequent", "p) a = a + \"MEAN OBS = %.4f (%.3f) \\n \" %", "period in minutes :param cmin: minimum values of clear sky reference to be", "assert n > 0 avg_x = np.nanmean(x ) avg_y = np.nanmean(y, dtype=np.float32) diffprod", "deviation only if number of datapoints is large enough if np.sum(np.isfinite(deltak)) > 5:", "y[idx] - avg_y diffprod += xdiff * ydiff xdiff2 += xdiff * xdiff", "N < 35: print(\"Number of data points for KSI not sufficient. N=\",N,\"<35\") return", "rmse(x,p,taxis=taxis) return ( 1 - np.divide(err1,err2) ) def skewness(x,y): \"\"\" Calculate skewness of", "= np.sum((y_pred == True) & (y_true == False),axis=taxis) FN = np.sum((y_pred == False)", "\"length\" of the clear sky irradiance plotted against time. On a clear day,", "a large number of very small forecast errors :param x: vector of observations", "is calculated as the ratio of the \"length\" of the measured irradiance plotted", "paper, 3rd International Workshop on Integration of Solar Power into Power Systems \"\"\"", "Description: Pearson’s correlation coefficient is a global error measure metric; a larger value", "my Marquez and Coimbra, 2013 (\"Proposed Metrics for Evaulation of Solar Forecasting Models\")", "committed, which need to be corrected through the starting of more expensive, but", "by Stein et al. \"The variability index: A new and novel metric for", "= list(x.shape) wh = shape[taxis] shape[taxis] = nbins TP = np.empty(shape) FP =", "low irradiance values y[cls<=cmin] = np.nan if nd == 1: # clear sky", "den == 0: return 0 return num / den def pearson(x, y): \"\"\"", "= len(y) if N < 35: print(\"Number of data points for KSI not", "xdiff ydiff2 += ydiff * ydiff cnt += 1 if cnt == 0:", "variability V is the standard deviation of the step-changes of the measured solar", "divided by the esti- mated clear sky value of the solar irradiance over", "of the normal distribution is known as the excess kurtosis. In the subsequent", "[142]: U alysis, the term kurtosis will be treated synonymously with excess kurtosis.", ":returns tp,fp: returns vector of true positive TP and false positive FP for", "errors are equal to forecast power minus actual power, a positive skewness of", "large number of very small forecast errors :param x: vector of observations :param", "t: int, optional: Timelag/stepsize t in indizes for increments :param cmin: float, optional:", "to be modeled Input: :param x: Vector of observation values :param y: Vector", "Metrics for Evaluating the Accuracy of Solar Power Forecasting, conference paper, 3rd International", "= sum(map(lambda x: pow(x, 2), y)) psum = sum(map(lambda x, y: x *", "Vector of forecast values :param p: Vector of reference forecast :returns: FS \"\"\"", "KSI not sufficient. N=\",N,\"<35\") return np.nan Vc = 1.63 / np.sqrt(N) D =", "correct for under-forecasting and over-forecasting events are not equal. An over-forecasting tendency could", "distribution is known as the excess kurtosis. In the subsequent anIn [142]: U", "from scipy.stats import kurtosis return kurtosis(x-y) def iqrdiff(x,y): \"\"\" Calculates Interquartile Range Difference", "errors. The MaxAE metric is useful to evaluate the forecasting of short-term extreme", "= y.flatten() wh = x.shape[0] ra = minmax[1] - minmax[0] cnt = 0", "/ m ksi = np.sum(D) def pearsonr(x, y): # Assume len(x) == len(y)", "determine if two data sets are significantly different. The KS statistic D is", "d = ( Pmax - Pmin ) / m ksi = np.sum(D) def", "units in the dispatch process. :param x: vector of observations :param y: vector", "= cls.copy() y[cls<=cmin] = np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2", "= np.nan sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32) sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32) VI", ":returns: MaxAE \"\"\" if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def", ") prody = np.multiply( diffy, diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan", "values :param y: Vector of forecast values :returns: R^2 \"\"\" r2 = 1", "irradiance so that the diurnal variability is neglected.\" This method can use single-dimensional", "forecast. sscore = 0 means the variability dominates the forecast. By definition a", "Description: The MBE metric intends to indicate average forecast bias. Understanding the overall", "TP and false positive FP for the given range of thresholds \"\"\" if", "subsequent and temporal equidistant instances ( timeseries ). Increments are calculated with an", "pow(sum_y, 2) / n), 0.5) if den == 0: return 0 return num", "is then calculated as the standard deviation of all increments. :param x: float", "index defined by Stein et al. \"The variability index: A new and novel", "The MBE metric intends to indicate average forecast bias. Understanding the overall forecast", "mean square error (RMSE) if an observation and forecast vector are given. Both", "c)) a = a + \"MEAN CLS = %.4f \\n \" % np.nanmean(c)", "\"\"\" Calculates Forecast Skill (FS) FS is defined as 1 - ( Error(Forecast)", "of irradiance values :param y: vector of irradiance forecasts :param cls: vector of", "+ TN))) #return accuracy_score(y_true, y_pred, **kwargs) def prints(x, y, c, p=\"\"): \"\"\" Gives", "= cdf(y,nbins=nbins) # Critical value Vc N = len(y) if N < 35:", "= 0. A negative sscore means that the forecast performs worse than a", "\"\"\" if taxis >= 0: shape = list(x.shape) wh = shape[taxis] shape[taxis] =", "score ( scalar ) VI \"\"\" y = cls.copy() y[cls<=cmin] = np.nan sum1", "while iterating is done on the values in the first axis. Variability is", "of forecasts :param taxis (optional): Axis along which the means are computed :returns:", "moment Assuming that forecast errors are equal to forecast power minus actual power,", "Test Integral (KSI) The KSI and OVER metrics were proposed by Espinar et", "FP for the given range of thresholds \"\"\" if taxis >= 0: shape", "default is 50 W/m2. :returns: deltak = vector of clear sky index increments", "%.4f \\n \" % mbe(x, y) a = a + \"CORR = %.4f", "return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a variability index defined by Stein", "variance of the errors to the variance of the data which is to", "th, TP[cnt], FP[cnt] cnt += 1 return TP, FP def accuracy(y_true,y_pred,taxis=0): \"\"\" Accuracy", "\"MEAN FOR = %.4f (%.3f) \\n \" % (np.nanmean(y), np.nanmean(y / c)) a", "y: Vector of forecast values :returns: R^2 \"\"\" r2 = 1 - (", "if taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return", "number of metrics\"\"\" a = \"Number of measurements = %d (%.2f) \\n \"", "x[idx] - avg_x ydiff = y[idx] - avg_y diffprod += xdiff * ydiff", "vector of observations :param y: vector of forecasts :param taxis (optional): Axis along", ">= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate mean absolute", "\\n \" % (np.nanmean(y), np.nanmean(y / c)) a = a + \"MEAN CLS", "prod1 = np.multiply( diffx, diffy ) prodx = np.multiply( diffx, diffx ) prody", "energy industry to evaluate forecast performance. The MAE metric is also a global", "reference values :param t: timelag for variability calculations :param cmin: minimum values of", "diffy ) prodx[np.isnan(prod1)] = np.nan prody[np.isnan(prod1)] = np.nan if taxis >= 0: r_num", "the third standardized moment Assuming that forecast errors are equal to forecast power", "values y[cls<=cmin] = np.nan if nd == 1: # clear sky index for", "cdf_y) # Observation maximum and minimum Pmax = np.max(x); Pmin = np.min(x) #", "with excess kurtosis. A distribution with a positive kurtosis value is known as", "temporal equidistant instances ( timeseries ). Increments are calculated with an moving window", "- ( np.nanvar(y-x) / np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar", "\"Number of measurements = %d (%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0]", "n = len(x) assert n > 0 avg_x = np.nanmean(x ) avg_y =", "of all increments. :param x: float vector of irradiance values :param cls: float", "number of datapoints is large enough if np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32))", "np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32) def mbe(x,y,taxis=-1): \"\"\" Calculate mean biae error (MBE) if", "of obserations :param y: Vector of forecasts :param taxis (optional): Axis along which", "def skewness(x,y): \"\"\" Calculate skewness of the probability distribution of the forecast error", "forecast performs worse than a persistence forecast. :param x: vector of irradiance values", "np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V, deltak def VI(x,cls,t,cmin=50.): \"\"\" Calculates a", "taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean(", "in the dispatch process. :param x: vector of observations :param y: vector of", "in regression problems and by the renewable energy industry to evaluate forecast performance.", "VI over a period of time is calculated as the ratio of the", "computed :returns: MBE \"\"\" if taxis >= 0: return np.nanmean((x-y),axis=taxis,dtype=np.float32) else: return np.nanmean(x-y,dtype=np.float32)", "% rmse(x, y) a = a + \"BIAS = %.4f \\n \" %", "of observation values :param y: Vector of forecast values :returns ksi: The KSI", "\" % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0])) a = a", "x: float vector of irradiance values :param cls: float vector of corresponding clear", "if taxis >= 0: return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32) else: return np.nanmax(abs(x-y),dtype=np.float32) def mae(x,y,taxis=-1): \"\"\" Calculate", "print(\"Number of data points for KSI not sufficient. N=\",N,\"<35\") return np.nan Vc =", "ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins)) for th in ths: y_pred = y >= th y_true", "compared. Description: The MaxAE is an indicative of local deviations of forecast errors.", "standard deviation only if number of datapoints is large enough if np.sum(np.isfinite(deltak)) >", "(2012) \"proposed metric for evaluation of solar forecasting models\" Description: The metric sscore", "fp) where tp is the number of true positives and fp the number", "is calculated as the ratio of the above defined forecast uncertainity U and", "over-forecast (or under-forecast) is important in that the system actions taken to correct", "( np.nanvar(y-x) / np.nanvar(x) ) return r2 def V(x,cls,t=1,cmin=50.): \"\"\" Calculates solar variability", "W/m2. :returns: deltak = vector of clear sky index increments :returns: V =", "ratio of the \"length\" of the measured irradiance plotted against time divided by", "= np.subtract(X,Xm) diffy = np.subtract(y,ym) prod1 = np.multiply( diffx, diffy ) prodx =", "are equal to forecast power minus actual power, a positive skewness of the", "regression problems and by the renewable energy industry to evaluate forecast performance. The", "This function returns the difference of two IQR. Input: :param x: Vector of", "the maximum value of the absolute difference between two cumulative distribution functions (CDFs),", "used in regression problems and by the renewable energy industry to evaluate forecast", "large thermal units being committed, which need to be corrected through the starting", "being committed, which need to be corrected through the starting of more expensive,", "forecasting) would allow power system operators to better allocate resources for compensating forecast", "= %d (%.2f) \\n \" % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) /", "forecast performance. The MAE metric is also a global error measure metric, which,", "an moving window along this axis. If two-dimensional vectors are provided subsequent instances", "roc(x,y,minmax,nbins=100,taxis=-1): \"\"\" Calculate Receiver Operating Curve (ROC) :param x: observation vector :param y:", "/ np.sqrt(N) D = np.max(cdf_x - cdf_y) # Observation maximum and minimum Pmax", "the \"length\" of the measured irradiance plotted against time divided by the \"length\"", "y) a = a + \"BIAS = %.4f \\n \" % mbe(x, y)", "In case of binary forecasts you can use boolean arrays or just 0", "performance. The MAE metric is also a global error measure metric, which, unlike", "Coefficient \"\"\" assert len(x) == len(y) n = len(x) assert n > 0", "\"\"\" from scipy.stats import skew return skew(x-y) def kurtosis(x,y): \"\"\" Calculate kurtosis of", "forecast vector :param c: clear sky vector :param p: reference vector :returns a:", "(KS) test is a nonparametric test to determine if two data sets are", "(changes in time) of irradiance will lead to higher values of VI. :param", "calculations. default is 50 W/m2. :returns: deltak = vector of clear sky index", "x >= th TP[cnt] = np.sum((y_pred == True) & (y_true == True),axis=taxis) /", "taxis == 0: ym = ym.T else: Xm = np.nanmean(X, dtype=np.float32) ym =", "len(y) if N < 35: print(\"Number of data points for KSI not sufficient.", "error measure metric; a larger value of Pearson’s correlation coefficient indicates an improved", "csi0 = np.divide(x[:,t],y[:,t]) # clear sky index for time t csi1 = np.divide(x[:,0],y[:,0])", "TP = np.empty(shape) FP = np.empty(shape) else: TP = np.empty(nbins) FP = np.empty(nbins)", "True) & (y_true == False),axis=taxis) FN = np.sum((y_pred == False) & (y_true ==", "with same index are compared. Description: Kurtosis is a measure of the magnitude", "but normalized differences are normalized to a given value. :param x: vector of", "(x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0])) a = a + \"Number", "= len(x) assert n > 0 avg_x = np.nanmean(x ) avg_y = np.nanmean(y,", "means are computed :returns: Correlation Coefficient \"\"\" ndims = X.ndim assert ndims <", "reference values :param cmin: minimum values of clear sky reference to be used", "= ( Pmax - Pmin ) / m ksi = np.sum(D) def pearsonr(x,", "pairs of elements with same index are compared. Description: The MAE has been", "= y >= th y_true = x >= th TP[cnt] = np.sum((y_pred ==", ":param y: Vector of forecast values :returns: R^2 \"\"\" r2 = 1 -", "return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32) else: return np.nanmean(abs(x-y),dtype=np.float32) def mape(x,y,fac,taxis=-1): \"\"\" Calculate mean absolute percentage", "time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd == 2: #", "index for time t #csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd)) csi1 = np.divide(x[0:-t],y[0:-t]) if nd ==", "compared. Description: Kurtosis is a measure of the magnitude of the peak of", "starting, units in the dispatch process. :param x: vector of observations :param y:", "Coefficient \"\"\" ndims = X.ndim assert ndims < 3 if taxis >= 0:", "index are compared. Description: The MaxAE is an indicative of local deviations of", "clear sky index for time t+deltat csi0 = np.divide(x[:,t],y[:,t]) # clear sky index", "np.sum(np.isfinite(deltak)) > 5: V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32)) else: V = np.nan return V, deltak", "arrays or just 0 or 1s. \"\"\" #from sklearn.metrics import accuracy_score TP =", "in Marquez and Coimbra (2012) \"proposed metric for evaluation of solar forecasting models\"", "== 0: Xm = Xm.T if taxis == 0: ym = ym.T else:", ":returns ksi: The KSI \"\"\" m = 100.0 nbins = 100 cdf_x =", "len(x) == len(y) n = len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq", "tp is the number of true positives and fp the number of false", "forecasting period. :param x: vector of observations :param y: vector of forecasts :param", "np.subtract(x,y), dtype=np.float32), axis=taxis) ) else: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) )) def maxae(x,y,", "computed :returns: RMSE \"\"\" if taxis >= 0: return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32),", "Vector of observation values :param y: Vector of forecast values :param p: Vector", "along which the means are computed :returns: RMSE \"\"\" if taxis >= 0:", "The KSI and OVER metrics were proposed by Espinar et al. 12. The", "optional: Timelag/stepsize t in indizes for increments :param cmin: float, optional: minimum values", ") :param nbins: number of bins/thresholds inside the range :returns tp,fp: returns vector", "# Critical value Vc N = len(y) if N < 35: print(\"Number of", ":param taxis (optional): Axis along which the means are computed :returns: MAE \"\"\"" ]
[ "outline entry has subsequent entries of a higher level inbetween itself and the", "elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content)", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1,", "compression type for all images in a document that we are converting to", "+ \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar", "\"interpolate_images\" property to \"False\" to make it so that the reader does not", "the lazy dog.\") # Create a \"PdfSaveOptions\" object that we can pass to", "the preferable option if we know that all our fields will be up", "this document on both sides of the pages, we can fold all the", "else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask", "\"1.7\" standard. # Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with the", "= \"Courier New\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\") #", "sources to ensure that we have access to both the fonts in this", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1]", "options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\",", "saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i", "\"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR +", "options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR +", "the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name =", "have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE:", "with any layers present in the document. # Set the \"page_mode\" property to", "one page starting from page two, which will only contain the second page.", "\"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, #", "with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline levels that", "0 R/Length 11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR", "pages, we can fold all the pages down the middle at once, #", "If an outline entry has subsequent entries of a higher level inbetween itself", "self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original", "body. # Clicking on an entry in this outline will take us to", "fonts if we edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options)", "pass to the document's \"save\" method # to modify how that method converts", "\"PDF/A-1b\" as well as preserving the document structure of the original document. #", "# Set the \"headings_outline_levels\" property to \"5\" to include all headings of levels", "to fonts, saving only the glyphs # that the document is using. The", "# Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+", "is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag = True doc.save(ARTIFACTS_DIR +", "The \"save\" method will apply our signature to the output document at this", "# self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif", "to export the text language. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\")", "as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look up", "a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for", "self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string())", "builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that we can pass to the document's", "to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode =", "\"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\",", "== aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def", "in the output PDF. # The document's size may become very large, but", "only register headings with heading levels that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels", "content) def test_preblend_images(self): for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows", "pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\")", "/FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0", "respective heading. # Set the \"headings_outline_levels\" property to \"5\" to include all headings", "to write additional text positioning operators. doc = aw.Document(MY_DIR + \"Text positioning operators.docx\")", "= file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height", "= aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to \"True\" to attempt to fix", "register headings with heading levels that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels =", "a document right before a save operation. # This is the preferable option", "which complies with \"PDF/A-1b\" as well as preserving the document structure of the", "with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows Metafile-related", "language. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options =", "aw.Document() builder = aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum dolor sit amet,", "\"True\" to get some PDF readers, such as Adobe Acrobat Pro, # to", "+ \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts:", "= aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to \"True\" to arrange the contents", "aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count)", "dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0", "aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard # custom document", "+ \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25,", "content) # form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() #", "page setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in", "file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000,", "# such as the one we have created above from the outline. #", "that we can pass to the document's \"save\" method # # to modify", "# # Create a \"PdfSaveOptions\" object that we can pass to the document's", "in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\"", "but we may need access to any custom fonts if we edit the", "in a document right before a save operation. # This is the preferable", "+ \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open)", "its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\")", "on the image # to see the interpolation effect if we saved the", "two headings we have inserted above will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR", "= [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters:", "document digitally and timestamp it. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF", "\"WMF with text.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "#ExSummary:Shows how to make footnotes and endnotes function as hyperlinks. doc = aw.Document(MY_DIR", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif", "self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Since our document", "\"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select", "a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in", "13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with", "to \"False\" to not apply PDF Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR", "/Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page,", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images):", "document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to \"True\"", "levels 1 and 5, and no headings with levels of 2, 3, and", "#self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF,", "doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document", "R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\",", "4 0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0", "it to our SaveOptions object to sign the document when we save it", "+ \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber)", "# export bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR +", "builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that we can pass", "IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert", "#ExSummary:Shows how to set instructions for some PDF readers to follow when opening", "= TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL))", "amet, consectetur adipiscing elit, \" + \"sed do eiusmod tempor incididunt ut labore", "self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) <", "with the specified properties has been generated.\"\"\" # return any(warning for warning in", "a different color space for images in a document as we export it", "to render every metafile using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # #", "we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options)", "# custom document properties as we save the document to .PDF. # Set", "factor when we open the document with it. # Set the \"zoom_factor\" property", "= pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in", "= aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only render", "to control the quality of the Jpeg images that end up in the", "# if warning.source == source and warning.warning_type == type and warning.description == description)", "the PDF standards compliance level of saved PDF documents. doc = aw.Document(MY_DIR +", "builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick", "to work with outline levels that do not contain any corresponding headings when", "to \"True\" to iterate through all the document # fields and update them", "object to sign the document when we save it to PDF. certificate_holder =", "options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000,", "aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count)", "treat the outline level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR", "the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european numerals. # Set the \"numeral_format\"", "R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata", "11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type", "and NUMPAGES fields. These fields do not display the correct value in real", "Set the \"page_index\" to \"1\" to render a portion of the document starting", "the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR", "\"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+ part of an EMF+ dual metafile.", "aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string())", "\"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length)", "def __init__(self): # self.warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if", "3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in", "with three rows. The first row, # whose text we will format in", "get the reader that opens this document to interpolate images. # Their resolution", "font, embedding in the output document may be desirable. # Set the \"embed_full_fonts\"", "the \"color_mode\" property to \"GRAYSCALE\" to render all images from the document in", "have created above from the outline. # Set the \"create_outlines_for_headings_in_tables\" property to \"True\"", "\"Image can not be processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content", "options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to \"True\" to embed every", "export any bookmarks that are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to", "68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "+ \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif def", "normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\",", "a set of pages from # our document to save in an output", "1/4 of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "to display the outline navigation pane in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES", "field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a fruit:", "self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD:", "above will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor", "self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\",", "# \"image_compression\" property to control the quality of all images that end up", "#pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE,", "#elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\"", "the impact that this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options)", "+ \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri)", "to \"False\" to render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we", "pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title):", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with", "convert it to PDF. # Set the \"compliance\" property to \"PdfCompliance.PDF17\" to comply", "#class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related warnings that occur upon saving", "\"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML:", "content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode ==", "embed every glyph of every embedded font in the output PDF. options.embed_full_fonts =", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property", "#ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers in a document that", "85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1", "the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to", "pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document", "the location of its respective heading. # Set the \"headings_outline_levels\" property to \"1\"", "\"use_book_fold_printing_settings\" property to \"True\" to arrange the contents # in the output PDF", "\"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader to open the saved", "while converting a document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") #", "folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for", "79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ", "= pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK:", "#ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings' level that will", "will make sure that all the fields will display # the most accurate", "pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode", "b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258", "european numerals. # Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the symbol", "to convert only some of the pages in a document to PDF. doc", "fonts while rendering a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to skip embedding Arial", "to \"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR", "to open these links in new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0 (a) 0 (m) 0", "< text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def", "\"PdfSaveOptions\" object that we can pass to the document's \"save\" method # #", "file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace", "to our SaveOptions object to sign the document when we save it to", "of the second 3rd level entry, and so on. # In the outline,", "limit the headings' level that will appear in the outline of a saved", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode ==", "{info.description}.\") # self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i] #", "several such \"sub-entries\". # In our document, the outline entries from the 5th", "to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" #", "\"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part of an EMF+ dual metafile if", "how to change the resolution of images in the PDF document. doc =", "aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object that we can pass", "aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create a \"PdfSaveOptions\" object that we can", "Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard, #", "outline entries from the 5th heading level are sub-entries of the second 4th", "4 as \"missing\". # Set the \"create_missing_outline_levels\" property to \"True\" to include all", "#self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning):", "all missing levels in the outline, # leaving blank outline entries since there", "document, we will need to provide the password before accessing its contents. doc.save(ARTIFACTS_DIR", "duration of the operation. # Set the \"memory_optimization\" property to \"False\" to save", "all headings within tables, # such as the one we have created above", "no larger than the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR", "#bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in", "warranty of any kind, either expressed or implied. import io import os from", "PDF. # Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use the # \"image_compression\"", "\"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g) 1 (, 10. November) -1 (", "annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport", "/FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace", "#with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR +", "and assign it to our SaveOptions object to sign the document when we", "scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts", "# content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text)", "\"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as", "if we edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if", "# self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO:", "(N) 0 (o) 0 (v) 0 (e) 0 (m) 0 (b) 0 (e)", "to replace some fonts, # including the two fonts in our document, with", "#ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution of images in the PDF", "\"rb\") as file: # content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK):", "#ExSummary:Shows how to render fallback shapes when saving to PDF. doc = aw.Document(MY_DIR", "b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc =", "#if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\",", "need to manually update them using updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\"", "render all images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR +", "\"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content =", "document's \"title\" built-in property in the tab that belongs to this document. #", "format using the Save method and the PdfSaveOptions class. doc = aw.Document() builder", "options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only the", "to # apply a percentage-based zoom factor when we open the document with", "in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False, True): with", "as Adobe Acrobat, we will need to zoom in on the image #", "to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within the output PDF document. #", "all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR", "a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The", "saving it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert text", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "row, # whose text we will format in a heading-type style, will serve", "= apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber", "a fallback to bitmap rendering and changing type of warnings about unsupported metafile", "# Set the \"embed_full_fonts\" property to \"False\" to apply subsetting to fonts, saving", "/Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) #", "[85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>\", content)", "property to \"False\" to ignore missing outline levels, # and treat the outline", "operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings()", "warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm", "self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3", "how that method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The", "\"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\"", "and is provided # \"as is\", without warranty of any kind, either expressed", "dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore", "#ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression when saving a document to", "+ \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read() if interpolate_images: self.assertIn( b\"7 0", "expressed or implied. import io import os from datetime import datetime, timedelta, timezone", "+ \"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\"", "aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action =", "a generated PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed", "display the outline navigation pane in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES #", "\"VECTOR_WITH_FALLBACK\" to try to render every metafile using vector graphics. # metafile_rendering_options.rendering_mode =", "in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources", "= aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\")", "page two, which will only contain the second page. doc.save(stream, options) #ExEnd #pdf_document", "(aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a", "the pages in a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "#ExFor:PdfCompliance #ExSummary:Shows how to set the PDF standards compliance level of saved PDF", "use to filter out a set of pages from # our document to", "to \"False\" to # preserve the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size =", "0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) #", "\"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression ==", "R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\" +", "text.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") # Insert a combo", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0", "= pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR", "world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with", "pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My", "#elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for", "the EMF+ records are supported. # Otherwise, Aspose.Words will render the EMF part.", "will contain one page starting from page two, which will only contain the", "text compression when saving a document to PDF. doc = aw.Document() builder =", "# self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart", "in a way that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document", "aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions for", "pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400,", "pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR +", "+ \"DrawingML shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback", "\"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the U+06F0 to U+06F9 range", "aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown", "({'odd' if i % 2 == 0 else 'even'})\") if i < 4:", "0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1]", "the document to .PDF. # Set the \"color_mode\" property to \"GRAYSCALE\" to render", "a rendered PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") #", "will allow a user to choose an option from a collection of strings.", "# pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts", "+ \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation()", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR", "of the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the", "# self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\",", "\"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action", "# in the outline, provided that they have a heading level that is", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read() if dml_rendering_mode ==", "give the zoom factor a value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior =", "the \"interpolate_images\" property to \"False\" to make it so that the reader does", "options) # 2 - Save only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR", "background logo.png\") # Create a \"PdfSaveOptions\" object that we can pass to the", "entries when we open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options)", "to the size of the metafile on the page. # Set the \"scale_wmf_fonts_to_metafile_size\"", "= \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox jumps over", "memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to", "using Javascript code # that forces readers to open these links in new", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data =", "property to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader # also to display the", "the cost of increased file size. # Set the \"export_document_structure\" property to \"False\"", "self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables:", "the document will be downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL)", "modify how that method converts the document to .PDF. # Set the \"display_doc_title\"", "how to set instructions for some PDF readers to follow when opening an", "3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart", "the configuration # # in our MetafileRenderingOptions object to the saving operation. #", "Set the \"emulate_raster_operations\" property to \"False\" to fall back to bitmap when #", "preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images", "# to modify how that method converts the document to .PDF. pdf_save_options =", "warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\",", "= \"Windows bar pdf title\" # Create a \"PdfSaveOptions\" object that we can", "# self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo] = []", "to interpolate images. # Their resolution should be lower than that of the", "+ \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif", "outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\" property", "Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to #", "operators.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "to create a \"Span\" tag in the document structure to export the text", "signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self):", "# content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn(", "400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>\", content) #def", "# self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\",", "\"rb\") as file: content = file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" +", "occur upon saving a document.\"\"\" # def __init__(self): # self.warnings = aw.WarningInfoCollection() #", "i) -> aw.WarningInfo: # return self.warnings[i] # def clear(self): # \"\"\"Clears warning collection.\"\"\"", "and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\")", "pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance", "are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc", "are converting to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR", "document's \"save\" method # to modify how that method converts the document to", "#4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\",", "page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True):", "#with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content = file.read() #if rendering_mode", "records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options", "property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to # apply a percentage-based", "+ \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object that we can pass to", ".PDF. options = aw.saving.PdfSaveOptions() # The output PDF document will contain an outline,", "booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber =", "to fall back to bitmap when # # it encounters a metafile, which", "0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\",", "images in the PDF document. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a", "to preserve the visual appearance of the document as Aspose.Words convert it to", "aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif", "save a document to the PDF format using the Save method and the", "Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in the", "set the numeral format used when saving to PDF. doc = aw.Document() builder", "/Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES,", "content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) #", "it encounters a metafile, which will require raster operations to render in the", "\"NORMAL\" to render all images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode", "3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading", "to determine the symbol set from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR +", "= render_text_as_bookfold # If we are rendering the document as a booklet, we", "which is a table of contents that lists headings in the document body.", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert text with PAGE", "\"[0 (S) 0 (a) 0 (m) 0 (s) 0 (t) 0 (a) -1", "how to update all the fields in a document immediately before saving it", "of the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED),", "document structure elements, which can assist in programmatically interpreting our document. doc =", "render every metafile using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create", "all headings whose levels are above 2 from the outline. # The last", "1 equivalents. # Set the \"use_core_fonts\" property to \"False\" to not apply PDF", "enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file:", "to lower the memory footprint of large documents' saving operations # at the", "document as we save it to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape", "pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in", "adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\")", "save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only", "page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode)", "/Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>\",", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED),", "= aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks:", "options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\"))", "test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode", "create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "New\" is a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier", "to use. # Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs", "= CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file:", "font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts())) # Create a", "to \"4\" to exclude all headings whose levels are above 4 from the", "as file: content = file.read() with io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11", "such as Adobe Acrobat Pro, # to display the value of the document's", "file size. # Set the \"additional_text_positioning\" property to \"False\" to render the document", "to \"PdfCompliance.PDF17\" to comply with the \"1.7\" standard. # Set the \"compliance\" property", "the 4th and 5th heading level entries are sub-entries of the second 3rd", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set", "edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000,", "in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that we can pass to the", "a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\",", "the Jpeg images that end up in the output PDF. # Set the", "\"True\" to render embedded EMF data # for metafiles that we can render", "0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11", "/Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733", "aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to \"True\" to save form fields as", "with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text positioning operators. doc", "6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE", "of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object that", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to", "navigation pane in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\"", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\")", "of the document starting from the second page. options.page_set = aw.saving.PageSet(1) # This", "to the saving operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options #", "as TOC entries of levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading", "and warning.warning_type == type and warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime)", "the \"create_missing_outline_levels\" property to \"True\" to include all missing levels in the outline,", "us to work with any layers present in the document. # Set the", "fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "#ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering quality of", "making documents searchable but may significantly increase the size of already large documents.", "The file will be considerably smaller, # but we may need access to", "raster operations to render in the output PDF. # metafile_rendering_options.emulate_raster_operations = False #", "images according to the size of the metafile on the page. # Set", "# is only intended as a supplement to the documentation, and is provided", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + #", "#ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\" tag in the document structure", "= aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object that we", "to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\",", "PDF. # metafile_rendering_options.emulate_raster_operations = False # # Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\"", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to \"False\" to", "0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5", "[0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1", "0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85", "options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\")", "method converts the document to .PDF. # Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\"", "controls visible. # Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the PDF", "above 4 from the outline. options.outline_options.headings_outline_levels = 4 # If an outline entry", "aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart", "0 (s) 0 (t) 0 (a) -1 (g) 1 (,) 0 ( )", "to \"PdfTextCompression.FLATE\" to apply ZIP compression # to text when we save the", "time we need them to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\"", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400,", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber()", "to get Aspose.Words to # automatically select the color space for images in", "image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to #", "artifacts. # Set the \"preblend_images\" property to \"False\" to render transparent images normally.", "from the U+06F0 to U+06F9 range as numbers. # Set the \"numeral_format\" property", "property to \"False\" to render transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR +", "options = aw.saving.PdfSaveOptions() # The output PDF document will contain an outline, which", "1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that we can pass to", "warning(self, info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info)", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property", "self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document", "three levels in the document outline. doc = aw.Document() builder = aw.DocumentBuilder(doc) #", "table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode", "builder.font.name = \"Courier New\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\")", "the PDF. options.update_fields = update_fields # We can clone PdfSaveOptions objects. options_copy =", "= aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing", "when we save the document to PDF. # Set the \"text_compression\" property to", "The larger the document, the bigger the impact that this will have. options.text_compression", "custom properties in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options)", "pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR", "aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}:", "#ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the fields in a document", "by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the", "# self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text)", "# break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5", "font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that we can pass to the document's", "pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber)", "its respective heading. # Set the \"headings_outline_levels\" property to \"5\" to include all", "interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask", "\"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object that we can pass to the", "# link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback", "the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\",", "# Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard,", "is using. The file will be considerably smaller, # but we may need", "render the EMF+ part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\"", "uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1]", "timestamp it. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR +", "a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\"", "text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) #", "to only register headings with heading levels that are no larger than 1.", ")/AP<</N 12 0 R>>>>\", # content) # form = pdf_document.form # self.assertEqual(1, pdf_document.form.count)", "1 and 5, and no headings with levels of 2, 3, and 4.", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\"", "True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1 font substitution.", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber)", "export Odd pages from the document. doc = aw.Document() builder = aw.DocumentBuilder(doc) for", "self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn(", "available via the # \"Content\" navigation pane of Adobe Acrobat at the cost", "test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how", "the document structure of the original document. # This helps with making documents", "Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all headings within tables, #", "timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout period", "+ \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream:", "Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within the", "#ExSummary:Shows how to save a document to the PDF format using the Save", "update them using updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\" # each time", "self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline levels that do", "as Aspose.Words convert it to PDF. # Set the \"compliance\" property to \"PdfCompliance.PDF17\"", "b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8", "options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property to \"36\" to", "\"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_language_to_span_tag.pdf\", save_options)", "the second 3rd level entry, and so on. # In the outline, we", "Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are in", "in range(5): builder.writeln(f\"Page {i + 1} ({'odd' if i % 2 == 0", "this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR +", "in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\"", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) #", "in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE #", "= pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for", "builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create", "test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings' level that", "and the PdfSaveOptions class. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a", "how to enable or disable subsetting when embedding fonts while rendering a document", "\"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR", "new pages when we click on them. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "a document that we save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution)", "# save_warning_callback.save_warnings[0].description, # \"Image can not be processed. Possibly unsupported image format.\") #class", "property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with more accuracy and also", "panel # with a thumbnail for each page in the document. # Set", "builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox jumps", "that method converts the document to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the", "= aw.DocumentBuilder(doc) # Insert headings of levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1", "self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) #", "and has no controls visible. # Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to", "# Create a digital signature and assign it to our SaveOptions object to", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property", "builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, \" + \"sed do eiusmod", "\"EMBED_NONE\" to not embed any fonts in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode", "us use it to make a booklet. # Set the \"use_book_fold_printing_settings\" property to", ") 1 (2) -1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in", "Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with more", "8, 9, 10, 50, 100\") # Create a \"PdfSaveOptions\" object that we can", "aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels", "/FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode ==", "+ \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4", "an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to #", "tempor incididunt ut labore et dolore magna aliqua.\") # Create a \"PdfSaveOptions\" object", "\"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: #", "as file: content = file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type", "(aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image", "the output document may be larger with this setting. # Set the \"color_mode\"", "# self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent", "property to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding in the output PDF.", "the document structure. options.export_document_structure = export_document_structure # Suppose we export document structure while", "properties that we can use to filter out a set of pages from", "outline, we can click on the arrow of the \"owner\" entry to collapse/expand", "def __init__(self): # self.warnings: List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}:", "pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0", "both the fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True)", "the first level of the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1", "images in the document that it converts to PDF. # In most cases,", "+ info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL):", "3, and 4 as \"missing\". # Set the \"create_missing_outline_levels\" property to \"True\" to", "711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5", "-> aw.WarningInfo: # return self.warnings[i] # def clear(self): # \"\"\"Clears warning collection.\"\"\" #", "et dolore magna aliqua.\") # Create a \"PdfSaveOptions\" object that we can pass", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)", "\") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object that we", "to \"True\" to get the reader that opens this document to interpolate images.", "heading. # Set the \"headings_outline_levels\" property to \"4\" to exclude all headings whose", "the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use the # \"image_compression\" property to control", "all images from the document in black and white. # The size of", "U+06F9 range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use", "(False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document to", "aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks: #", "elements, which can assist in programmatically interpreting our document. doc = aw.Document() builder", "test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how", "##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap rendering and", "pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode", "section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document on both sides of", "(\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0", "document's size may become very large, but we will have full use of", "a booklet, we must set the \"multiple_pages\" # properties of the page setup", "+ \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) #", "at the cost of increased file size. # Set the \"export_document_structure\" property to", "levels that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the", "\"Span\" tag in the document structure to export the text language. doc =", ",۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how", "733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0,", "can fold all the pages down the middle at once, # and the", "method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\"", "self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with", "as TOC entries of levels 1, 2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1", "self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression when saving", "\"save\" method # # to modify how that method converts the document to", "some PDF readers to follow when opening an output document. doc = aw.Document()", "/Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711", "# # This file is part of Aspose.Words. The source code in this", "/Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field =", "the outline level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR +", "== aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠,", "pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode ==", "/Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document =", "110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR +", "#ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure elements, which can assist in", "sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\") # Create", "\"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self):", "content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS", "+ \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name)", "# aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3", "with a reader such as Adobe Acrobat, we will need to zoom in", "def test_update_fields(self): for update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows", "to save a document to the PDF format in the form of a", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\" property to", "sign a saved PDF document digitally and timestamp it. doc = aw.Document() builder", "interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation", "# self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count)", "the # \"image_compression\" property to control the quality of the Jpeg images that", "(even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber", "# Set the \"zoom_factor\" property to \"25\" to give the zoom factor a", "\"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if", "as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\"", "can serve as TOC entries of levels 1, 2, and then 3. builder.paragraph_format.style_identifier", "level 2 and lower outline entries # and collapse all level and 3", "\"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+ part of an", "not embed any fonts in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR +", "from the document. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(5):", "# whose text we will format in a heading-type style, will serve as", "saved PDF. # Aspose.Words will also apply Flate compression to all images and", "# self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) <", "table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3", "the PdfSaveOptions class. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit:", "+ \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content = file.read()", "to freeze all form fields in the document at # their current values", "for all images in a document that we are converting to PDF. doc", "to metafile size on the page. doc = aw.Document(MY_DIR + \"WMF with text.docx\")", "aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\"", "#self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000,", "Clicking on an entry in this outline will take us to the location", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)", "pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change", "\"display_doc_title\" to \"True\" to get some PDF readers, such as Adobe Acrobat Pro,", "assist in programmatically interpreting our document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style", "constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name)", "builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The", "# Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to", "The last two headings we have inserted above will not appear. save_options.outline_options.headings_outline_levels =", "self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g) 1 (, 10.", "the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part of an", "in range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, \" + \"sed", "806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string())", "4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that we can pass to the", "\"use_core_fonts\" property to \"True\" to replace some fonts, # including the two fonts", "render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a", "0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width", "def warning(self, info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") #", "image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\",", "document scaled at 1/4 of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd", "PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object that", "in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set", "a background, which may reduce artifacts. # Set the \"preblend_images\" property to \"False\"", "[0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif", "count(self): # return len(self.warnings) # def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str)", "#ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline levels that do not contain any", "as we save it to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\")", "page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True):", "+ \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document =", "EMF+ dual metafile if all of the EMF+ records are supported. # Otherwise,", "\"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf", "159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type", "== aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR +", "signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object", "pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False, True):", "+ \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR +", "original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name", "effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\" + #", "# if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for", "def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri,", "our fields will be up to date before saving. # Set the \"update_fields\"", "image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") #", "0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) #", "a separate panel # that allows us to work with any layers present", "+ \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber)", "b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202", "\"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag = True doc.save(ARTIFACTS_DIR", "#8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\",", "outline levels 2, 3, and 4 as \"missing\". # Set the \"create_missing_outline_levels\" property", "# Set the \"color_mode\" property to \"NORMAL\" to render all images in color.", "R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0", "in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or", "= aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property to \"False\" to fall back", "downsampling to # images with a resolution that is above 128 ppi. options.downsample_options.resolution_threshold", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def", "images from the document in black and white. # The size of the", "] for uri, result in parameters: with self.subTest(uri=uri, result=result): doc = aw.Document() builder", "to zoom in on the image # to see the interpolation effect if", "48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted image.docx\") #", "\"True\" to iterate through all the document # fields and update them before", "while saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) img", "output PDF document. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve", "a custom font, embedding in the output document may be desirable. # Set", "pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: #", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png", "color with saving options property. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a", "back to bitmap when # # it encounters a metafile, which will require", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to \"True\"", "#ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression type for all", "to save all hyperlinks using Javascript code # that forces readers to open", "an outline, which is a table of contents that lists headings in the", "# self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size", "that is above 128 ppi. options.downsample_options.resolution_threshold = 128 # Only the first two", "[85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>\", content)", "= aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback", "= aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick", "to get the reader that opens this document to interpolate images. # Their", "and lower outline entries # and collapse all level and 3 and higher", "#self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading", "the \"compliance\" property to \"PdfCompliance.PDF17\" to comply with the \"1.7\" standard. # Set", "export_document_structure # Suppose we export document structure while saving this document. In that", "only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3", "try to render every metafile using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK #", "with a resolution that is above 128 ppi. options.downsample_options.resolution_threshold = 128 # Only", "of their page numbers. # 1 - Save only the even-numbered pages: options.page_set", "how to convert only some of the pages in a document to PDF.", "(odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) def", "in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode", "document, with their PDF Type 1 equivalents. # Set the \"use_core_fonts\" property to", "saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that", "\"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode", "save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\")", "property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part of an EMF+ dual", "# self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance):", "# that allows us to work with any layers present in the document.", "factor a value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor =", "font substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\")", "# to render a simplified version of DrawingML effects. # Set the \"dml_effects_rendering_mode\"", "obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\"))", "image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property to \"False\"", "modify how that method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() #", "aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page", "#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title)", ".PDF. options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of the \"SaveOptions\" object", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how", "Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the PDF reader to display", "to sign a generated PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents", "aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) #", "result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\")", "text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S)", "131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" #", "preserve_form_fields: # self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" +", "settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "12 Tf )/AP<</N 12 0 R>>>>\", # content) # form = pdf_document.form #", "Insert headings that can serve as TOC entries of levels 1, 2, and", "file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression", "#table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart", "#ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole document to PDF", "R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0", "of the original document. # This helps with making documents searchable but may", "property to \"False\" to save the document as a PDF normally. save_options.memory_optimization =", "with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document to the PDF", "pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\" property to \"10\" to strengthen compression", "that the document is using. The file will be considerably smaller, # but", "#ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering quality of DrawingML", "b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318", "pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: #", "def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how", "== aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document =", "# def count(self): # return len(self.warnings) # def contains(self, source: aw.WarningSource, type: aw.WarningType,", "#ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default zooming that a", "self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc", "\"PdfPageMode.USE_NONE\" to get the PDF reader to display just the document itself. options.page_mode", "open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content = file.read().decode('utf-8') #if preserve_form_fields: #", "modify how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format", "self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA:", "when we open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd", "file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" +", "b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612", "EMF+ records are supported. # Otherwise, Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode", "0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode", "elements such as the heading # and the next paragraph via \"View\" ->", "appearance of the document as Aspose.Words convert it to PDF. # Set the", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output", "embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading", "to embed every glyph of every embedded font in the output PDF. #", "entries # and collapse all level and 3 and higher entries when we", "document to .PDF. # Set the \"color_mode\" property to \"GRAYSCALE\" to render all", "# \"\"\"Returns True if a warning with the specified properties has been generated.\"\"\"", "( ) 1 (2) -1 (0) 0 (1) 0 (8)] TJ\", # tj_operator.to_string())", "headers and footers.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL))", "reduce artifacts. # Set the \"preblend_images\" property to \"False\" to render transparent images", "the document outline. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings of", "def test_doc_title(self): for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how", "tables # in the outline, provided that they have a heading level that", "comply with the \"PDF/A-1b\" standard, # which aims to preserve the visual appearance", "\"image_compression\" property to control the quality of the Jpeg images that end up", "to get some PDF readers, such as Adobe Acrobat Pro, # to display", "builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create a \"PdfSaveOptions\" object", "bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization):", "\"wb\") as stream: # Create a \"PdfSaveOptions\" object that we can pass to", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if", "in black and white. # The size of the output document may be", "#ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a", "== aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6", "self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if", "to discard # custom document properties as we save the document to .PDF.", "100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥, ٦,", "its respective heading. # Set the \"headings_outline_levels\" property to \"4\" to exclude all", "it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert text with", "the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use the # \"image_compression\" property to control", "1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\")", "733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5", "-1 (g) 1 (, 10. November) -1 ( ) 1 (2) -1 (018)]", "677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "options = aw.saving.PdfSaveOptions() # Create a digital signature and assign it to our", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\"", "== aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\",", "+ \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else", "of any kind, either expressed or implied. import io import os from datetime", "self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks", "= aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all", "builder.writeln(\"The quick brown fox jumps over the lazy dog.\") # Create a \"PdfSaveOptions\"", "specify a compression type for all images in a document that we are", "to display the value of the document's \"title\" built-in property in the tab", "metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property to \"False\" to fall", "to .PDF. # Set the \"color_mode\" property to \"GRAYSCALE\" to render all images", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\"", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode)", "pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields", "open the document with it. # Set the \"zoom_factor\" property to \"25\" to", "to the documentation, and is provided # \"as is\", without warranty of any", "text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in", "pane in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property", "that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE", "212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL))", "/Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y)", "# self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\",", "doc = aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() #", "reader to # apply a percentage-based zoom factor when we open the document", "get the PDF reader to display just the document itself. options.page_mode = page_mode", "pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\"))", "property to \"PdfImageCompression.JPEG\" to use the # \"image_compression\" property to control the quality", "pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif", "color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to", "pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length)", "of DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render", "booklet, we must set the \"multiple_pages\" # properties of the page setup objects", "a separate panel # with a thumbnail for each page in the document.", "R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to", "reader such as Adobe Acrobat, we will see the document scaled at 1/4", "to preserve custom properties in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR +", "that method converts the document to .PDF. # Set the \"display_doc_title\" to \"True\"", "612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read() with io.BytesIO() as", "of pages from # our document to save in an output PDF document", "within tables, # such as the one we have created above from the", "self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect", "\"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page", "structure elements, which can assist in programmatically interpreting our document. doc = aw.Document()", "range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look", "will have full use of all fonts if we edit the PDF. #", "aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object that we can", "warning(self, info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" +", "R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a)", "= page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR +", "of every embedded font in the output PDF. options.embed_full_fonts = True # Set", "8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content)", "7, 8, 9, 10, 50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\",", "+ \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn(", "4th and 5th heading level entries are sub-entries of the second 3rd level", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\"", "the output PDF in a way that helps us use it to make", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber", "doc = aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback =", "= False # # Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to", "all the fields in a document right before a save operation. # This", "documents' saving operations # at the cost of increasing the duration of the", "+ \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber =", "documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows", "set the default zooming that a reader applies when opening a rendered PDF", "test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result", "signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR", "Ltd. All Rights Reserved. # # This file is part of Aspose.Words. The", "the \"headings_outline_levels\" property to \"2\" to exclude all headings whose levels are above", "as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier", "= aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\" # Create a", "# leaving blank outline entries since there are no usable headings. # Set", "# content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: #", "8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7", "the contents will line up in a way that creates a booklet. doc.save(ARTIFACTS_DIR", "not export any bookmarks that are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property", "content = file.read() with io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849", "is the preferable option if we know that all our fields will be", "complies with \"PDF/A-1b\" as well as preserving the document structure of the original", "entries from the 5th heading level are sub-entries of the second 4th level", "#ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole document to PDF with three levels", "to collapse/expand all its sub-entries. # Set the \"expanded_outline_levels\" property to \"2\" to", "render in the output PDF. # metafile_rendering_options.emulate_raster_operations = False # # Set the", "have full use of all fonts if we edit the PDF. # Set", "as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "fonts in the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to", "apply any interpolation. save_options.interpolate_images = interpolate_images # When we open this document with", "= rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to \"True\" to render embedded EMF", "open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content = file.read() #if effects_rendering_mode in", "timezone import aspose.words as aw import aspose.pydrawing as drawing from api_example_base import ApiExampleBase,", "second 4th level outline entry, # the 4th and 5th heading level entries", "update them before we save it as a PDF. This will make sure", "= pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name)", "for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save", "user to choose an option from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\",", "form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please", "to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in an XMP packet. options.custom_properties_export =", "that end up in the output PDF. pdf_save_options.image_compression = pdf_image_compression # Set the", "\"Transparent background logo.png\") # Create a \"PdfSaveOptions\" object that we can pass to", "self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes when saving to", "3 and higher entries when we open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR", "+ \"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "/XYZ 212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document =", "12 0 R>>>>\", # content) # form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) #", "heading. # Set the \"headings_outline_levels\" property to \"5\" to include all headings of", "\"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane in the output PDF. save_options.page_mode =", "for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode", "with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the", "of warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR +", "or lower level, # an arrow will appear to the left of the", "text with PAGE and NUMPAGES fields. These fields do not display the correct", "pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: #", "save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to \"True\" to get the", "# self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ", "# self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) <", "how to limit the headings' level that will appear in the outline of", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent", "appear in the outline of a saved PDF document. doc = aw.Document() builder", "= aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier", "(aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text", "#self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in", "property to \"PdfPageMode.USE_OC\" to get the PDF reader to display a separate panel", "if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6", "# self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format)", "for uri, result in parameters: with self.subTest(uri=uri, result=result): doc = aw.Document() builder =", "+ \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only the odd-numbered pages: options.page_set =", "#if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000,", "#ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression when saving a document to PDF.", "PDF reader to # apply a percentage-based zoom factor when we open the", "all the fields in a document immediately before saving it to PDF. doc", "middle at once, # and the contents will line up in a way", "\"export_document_structure\" property to \"True\" to make the document structure, such tags, available via", "\"True\" to arrange the contents # in the output PDF in a way", "#7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\",", "the document. # Set the \"interpolate_images\" property to \"False\" to make it so", "/Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>\",", "builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that we can pass", "self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering", "options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR +", "strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\" object that we", "= 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6, 7, 8, 9,", "with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000,", "options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to \"True\" to arrange the", "the PDF reader # also to display the outline, if possible. # Set", "= aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "entries of a higher level inbetween itself and the next entry of the", "157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W", "the \"open_hyperlinks_in_new_window\" property to \"True\" to save all hyperlinks using Javascript code #", "to choose an option from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"],", "the contents # in the output PDF in a way that helps us", "for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save", "the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed all fonts in the output PDF.", "and footers.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "#pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if", "\"PdfTextCompression.NONE\" to not apply any # compression to text when we save the", "of the operation. # Set the \"memory_optimization\" property to \"False\" to save the", "interpolation effect if we saved the document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\",", "0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12", "options.downsample_options.resolution = 36 # Set the \"resolution_threshold\" property to only apply the downsampling", "= aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs", "EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to \"True\" to", "\"Arvo\" for font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that we can", "saving to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 #", "\"page_set\" properties that we can use to filter out a set of pages", "Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. # Set", "to PDF. doc = aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\") # Create", "#ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole document to PDF with", "aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts,", "to set instructions for some PDF readers to follow when opening an output", "space for images in the document that it converts to PDF. # In", "modify how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression", "0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4,", "signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart", "\\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS,", "doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\" object that we can", "'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that we", "doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert text with PAGE and NUMPAGES", "of the document's \"title\" built-in property in the tab that belongs to this", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page", "self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) #", "#endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows", "we export it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\")", "\"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR", "# self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\",", "Save only the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) #", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format)", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: #", "there are no usable headings. # Set the \"create_missing_outline_levels\" property to \"False\" to", "[70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else:", "first two images from the document will be downsampled at this stage. doc.save(ARTIFACTS_DIR", "to include all headings of levels 5 and below in the outline. save_options.outline_options.headings_outline_levels", "= aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create a \"PdfSaveOptions\" object that we", "in this file # is only intended as a supplement to the documentation,", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\"", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Create a digital signature and", "outline. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings of levels 1", "page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\",", "#ExSummary:Shows how to perform interpolation on images while saving a document to PDF.", "to perform interpolation on images while saving a document to PDF. doc =", "is no larger than the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables", ".PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display", "may become very large, but we will have full use of all fonts", "document will contain one page starting from page two, which will only contain", "aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create", "10 0 R/X2 11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\",", "the EMF+ part of an EMF+ dual metafile if all of the EMF+", "the \"preblend_images\" property to \"False\" to render transparent images normally. options.preblend_images = preblend_images", "#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data", "compression when saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "# def __init__(self): # self.warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): #", "[0 0 0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field", "the original document. # This helps with making documents searchable but may significantly", "< text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) #", "# Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get the PDF reader to", "#ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole document to PDF with three", "all images in a document that we save to PDF to 220 ppi.", "3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB", "self.assertEqual(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100\", text_absorber.text)", "to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard, # which aims to preserve", "the reader does not apply any interpolation. save_options.interpolate_images = interpolate_images # When we", "pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every", "we open this document, we will need to provide the password before accessing", "PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to #", "has been generated.\"\"\" # return any(warning for warning in self.warnings # if warning.source", "pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode", "headings when saving a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) #", "of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When", "+ \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2,", "to skip embedding Arial and Times New Roman fonts into a PDF document.", "True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling according to", "#pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions for some PDF readers", "elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): #", "0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\" to render a", "very large, but we will have full use of all fonts if we", "aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content = file.read()", "embed every glyph of every embedded font in the output PDF. # The", "doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\" object that", "This is the preferable option if we know that all our fields will", "options = aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to \"False\" to not update", "document to PDF with three levels in the document outline. doc = aw.Document()", "CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8') if page_mode", "\", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5 0", "14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: #", "self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) #", "/FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\")", "arrow of the \"owner\" entry to collapse/expand all its sub-entries. # Set the", "aw.saving.PageSet(1) # This document will contain one page starting from page two, which", "doc = aw.Document(MY_DIR + \"WMF with text.docx\") # Create a \"PdfSaveOptions\" object that", "modify how that method converts the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) #", "collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\" object", "box which will allow a user to choose an option from a collection", "readers to follow when opening an output document. doc = aw.Document() builder =", "in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document", "encryption_details # When we open this document, we will need to provide the", "we will see the document scaled at 1/4 of its actual size. doc.save(ARTIFACTS_DIR", "document that we are converting to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "present in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get", "including the two fonts in our document, with their PDF Type 1 equivalents.", "document as the title bar. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\")", "\"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"),", "with it. # Set the \"zoom_factor\" property to \"25\" to give the zoom", "aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters: with self.subTest(uri=uri, result=result):", "pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode", "6, 7, 8, 9, 10, 50, 100\") # Create a \"PdfSaveOptions\" object that", "converts the document to .PDF. # Set the \"color_mode\" property to \"GRAYSCALE\" to", "pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance", "212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in", "test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to", "Set the \"color_mode\" property to \"GRAYSCALE\" to render all images from the document", "#ExSummary:Shows how enable/disable PDF Type 1 font substitution. doc = aw.Document() builder =", "pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as", "\"preblend_images\" property to \"False\" to render transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR", "+ \"WMF with text.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "we must set the \"multiple_pages\" # properties of the page setup objects of", "fields in a document immediately before saving it to PDF. doc = aw.Document()", "the # \"image_compression\" property to control the quality of all images that end", "builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a \"PdfSaveOptions\"", "\"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in", "\"Arial\" is a standard font, and \"Courier New\" is a nonstandard font. builder.font.name", "allows us to work with any layers present in the document. # Set", "# In most cases, the color space will be RGB. # Set the", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\")", "= aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\"", "save hyperlinks in a document we convert to PDF so that they open", "# self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\",", "symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with", "# self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for", "#ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on images while saving a document to", "NUMPAGES fields. These fields do not display the correct value in real time.", "\"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle", "for warning in self.warnings # if warning.source == source and warning.warning_type == type", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "to get the PDF reader to display just the document itself. options.page_mode =", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to \"True\" to turn", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "+ \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as", "\"as is\", without warranty of any kind, either expressed or implied. import io", "glyphs to use. # Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use", "fonts into a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\"", "as aw import aspose.pydrawing as drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR,", "them as plain text in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR +", "= aw.saving.PdfSaveOptions() # Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with the", "of the Jpeg images that end up in the output PDF. # Set", "PDF reader to display a separate panel # that allows us to work", "aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\")", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\"", "Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the U+0660", "aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the PDF", "#self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM):", "#ExSummary:Shows how to export Odd pages from the document. doc = aw.Document() builder", "break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0", "\"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in", "aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\" property. save_options.encryption_details = encryption_details # When", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Since", "original document. # This helps with making documents searchable but may significantly increase", "save_options.outline_options.headings_outline_levels = 5 # This document contains headings of levels 1 and 5,", "+ \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read() if export_document_structure: self.assertIn( b\"5 0", "open it using Adobe Acrobat and find tags for elements such as the", "self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode", "(\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>\", # content) #", "0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR", "# doc = aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback()", "\"Page 4 (even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag", "#ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\")", "= aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to \"True\" to make the document", "\"[(Samsta) -1 (g) 1 (, 10. November) -1 ( ) 1 (2) -1", "5, 6, 7, 8, 9, 10, 50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC:", "6, 7, 8, 9, 10, 50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: #", "#if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\",", "the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only render the EMF part of", "\"True\" to save all hyperlinks using Javascript code # that forces readers to", "document we convert to PDF so that they open new pages when we", "0 R /XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376", "render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to", "test_preserve_form_fields(self): for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to", "def test_memory_optimization(self): for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows", "shapes. # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render the DML", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to \"PdfTextCompression.NONE\"", "default zooming that a reader applies when opening a rendered PDF document. doc", "eiusmod tempor incididunt ut labore et dolore magna aliqua.\") # Create a \"PdfSaveOptions\"", "aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0,", "#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\",", "options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\")", "% 2 == 0 else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create", "and warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date", "# self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format)", "\"PdfTextCompression.FLATE\" to apply ZIP compression # to text when we save the document", "\"create_note_hyperlinks\" property to \"True\" to turn all footnote/endnote symbols # in the text", "400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self):", "pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE,", "f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode ==", "pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set", "the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with their fallback", "\"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard, # which aims", "color space for images in a document as we export it to PDF.", "obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) #pdf_document =", "contain the second page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1,", "#8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\",", "aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object that we", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length)", "#ExFor:PdfPageMode #ExSummary:Shows how to set instructions for some PDF readers to follow when", "output PDF. # Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use the #", "as the heading # and the next paragraph via \"View\" -> \"Show/Hide\" ->", "rendering options when saving to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") # Create", "aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream)", "= use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else:", "link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA):", "document will contain an outline, which is a table of contents that lists", "if warning.source == source and warning.warning_type == type and warning.description == description) def", "to .PDF. # Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF", "test_embed_full_fonts(self): for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how", "how that method converts the document to .PDF. # Set the \"color_mode\" property", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object", "\"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters: with self.subTest(uri=uri, result=result): doc", "to \"NumeralFormat.CONTEXT\" to # look up the locale to determine what number of", "obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612", "apply ZIP compression # to text when we save the document to PDF.", "document structure. options.export_document_structure = export_document_structure # Suppose we export document structure while saving", "pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False, True):", "\"PdfCompliance.PDF17\" to comply with the \"1.7\" standard. # Set the \"compliance\" property to", "save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\",", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text()", "self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document outline entries for headings", "Set the \"compliance\" property to \"PdfCompliance.PDF17\" to comply with the \"1.7\" standard. #", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to", "#ExSummary:Shows how to apply text compression when saving a document to PDF. doc", "# for metafiles that we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True", "\"page_mode\" property to \"PdfPageMode.USE_OC\" to get the PDF reader to display a separate", "for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to", "display them as plain text in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR", "aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output PDF document will contain an outline,", "appear to the left of the entry. This entry is the \"owner\" of", "792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group", "preserve document structure elements, which can assist in programmatically interpreting our document. doc", "SaveOptions object to sign the document when we save it to PDF. certificate_holder", "= aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3,", "#6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\",", "save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR", "to PDF. The larger the document, the bigger the impact that this will", "any corresponding headings when saving a PDF document. doc = aw.Document() builder =", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL))", "in the output PDF. # metafile_rendering_options.emulate_raster_operations = False # # Set the \"rendering_mode\"", "the document. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page", "\"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\"", "options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) # Create a timestamp authority-verified", "\"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), #", "save operation. # This is the preferable option if we know that all", ".PDF. save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain an outline,", "aw.WarningType, description: str) -> bool: # \"\"\"Returns True if a warning with the", "preblend images with transparent backgrounds while saving a document to PDF. doc =", "# self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) <", "property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version of DrawingML effects. #", "= aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") # Insert a combo box which", "= pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\",", "and the contents will line up in a way that creates a booklet.", "EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to", "to all images and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR", "# doc = aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() #", "set of pages from # our document to save in an output PDF", "graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "and 5, and no headings with levels of 2, 3, and 4. #", "to save the document as a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR +", "DrawingML effects with more accuracy and also with more processing cost. options.dml_effects_rendering_mode =", "aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to \"True\" to get the reader that", "content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") #", "to modify how that method converts the document to .PDF and applies the", "\"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type", "optimize memory consumption when rendering large documents to PDF. doc = aw.Document(MY_DIR +", "their page numbers. # 1 - Save only the even-numbered pages: options.page_set =", "#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to", "fallbacks.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout period via the constructor. options.digital_signature_details.timestamp_settings", "we save the document to PDF. The larger the document, the bigger the", "how to sign a saved PDF document digitally and timestamp it. doc =", "pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A:", "\"Cherry\"], 0) # Create a \"PdfSaveOptions\" object that we can pass to the", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "Set the \"preserve_form_fields\" property to \"True\" to save form fields as interactive objects", "= aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set", "the PDF reader to open the saved # document in full-screen mode, which", "#3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\",", "save it to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create", "in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd", "+ \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization", "when saving to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096", "display the value of the document's \"title\" built-in property in the tab that", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL))", "10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\")", "aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is a standard font, and \"Courier New\"", "test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to", "self.assertEqual(8, len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ", "R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML shape 3D", "builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create", "self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory consumption when rendering", "different color space for images in a document as we export it to", "\"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane in the output", "6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group", "document will treat outline levels 2, 3, and 4 as \"missing\". # Set", "aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object that we can pass to the", "these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to", "aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\"", "aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The", "\"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS:", "# self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0 (a) 0", "#else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with", "property to \"False\" to # preserve the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size", "type of warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR", "to \"PdfPageMode.USE_NONE\" to get the PDF reader to display just the document itself.", "the document in black and white. # The size of the output document", "properties has been generated.\"\"\" # return any(warning for warning in self.warnings # if", "and 3 and higher entries when we open the document. options.outline_options.expanded_outline_levels = 2", "By default, Aspose.Words downsample all images in a document that we save to", "output PDF. pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\" property to \"10\" to", "if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W", "range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, \" + \"sed do", "preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \",", "\"DrawingML shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback #", "#10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "\"Arvo\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\") # Configure our", "85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted image.docx\")", "to fix incorrect # element positioning in the output PDF, should there be", "outline will take us to the location of its respective heading. # Set", "-1 ( ) 1 (2) -1 (0) 0 (1) 0 (8)] TJ\", #", "aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS", "\"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the U+0660 to U+0669 range as numbers.", "that it converts to PDF. # In most cases, the color space will", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_index\"", "# self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) <", "PDF document digitally and timestamp it. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed", "self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property", "elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0", "= save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image", "b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\",", "612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode ==", "level inbetween itself and the next entry of the same or lower level,", "outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\",", "\"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown fox jumps over", "#ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved PDF document digitally and", "built-in property in the tab that belongs to this document. # Set the", "\"create_note_hyperlinks\" property to \"False\" not to have footnote/endnote symbols link to anything. options.create_note_hyperlinks", "= aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\") # Create a \"PdfSaveOptions\" object", "to bitmap when # # it encounters a metafile, which will require raster", "for section in doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we", "to comply with the \"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\" as well", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\",", "self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i] # def clear(self):", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document", "whose levels are above 2 from the outline. # The last two headings", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\",", "to \"False\" to freeze all form fields in the document at # their", "builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all permissions. encryption_details.permissions", "(.) 0 ( ) 0 (N) 0 (o) 0 (v) 0 (e) 0", "aw.DocumentBuilder(doc) # Insert headings that can serve as TOC entries of levels 1,", "pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to", "841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content)", "\"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read()", "(e) 0 (r) -1 ( ) 1 (2) -1 (0) 0 (1) 0", "= True # Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed all fonts", "property to \"PdfCustomPropertiesExport.NONE\" to discard # custom document properties as we save the", "magna aliqua.\") # Create a \"PdfSaveOptions\" object that we can pass to the", "# we can open it using Adobe Acrobat and find tags for elements", "text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if", "the entry. This entry is the \"owner\" of several such \"sub-entries\". # In", "graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object that we", "preblend transparent images # with a background, which may reduce artifacts. # Set", "3 - Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd", "True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title of the", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR +", "make a booklet. # Set the \"use_book_fold_printing_settings\" property to \"False\" to render the", "[202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>\", content)", "to \"VECTOR_WITH_FALLBACK\" to try to render every metafile using vector graphics. # metafile_rendering_options.rendering_mode", "disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the editing", "\"Field.Update()\", and \"Document.UpdateFields()\" # each time we need them to display accurate values.", "#ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory consumption when rendering large", "+ \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0 (a) 0 (m) 0 (s)", "#ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on images while saving a document", "for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to", "/XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS", "b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter", "#ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties while converting a document to", "content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self):", "builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\")", "to 36 ppi. options.downsample_options.resolution = 36 # Set the \"resolution_threshold\" property to only", "to \"True\" to save all hyperlinks using Javascript code # that forces readers", "display a separate panel # with a thumbnail for each page in the", "all headings within tables # in the outline, provided that they have a", "= text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0", "to # not export any bookmarks that are inside headers/footers. # Set the", "property to \"True\" to embed every glyph of every embedded font in the", "= aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name ==", "aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create", "\"PdfPageMode.USE_OC\" to get the PDF reader to display a separate panel # that", "(r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters: with self.subTest(uri=uri, result=result): doc =", "# to modify how that method converts the document to .PDF. # Set", "heading level 2 and lower outline entries # and collapse all level and", "pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to \"True\" to save form", "how to preblend images with transparent backgrounds while saving a document to PDF.", "that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption", "= aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan of the timestamp is 100", "the saving operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback", "in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode)", "١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷", "if i % 2 == 0 else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK)", "self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply our", "property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode", "else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd", "# \"Arial\" is a standard font, and \"Courier New\" is a nonstandard font.", "# self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\",", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard", "pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self):", "self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images", "TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta)", "(odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber", "0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377", "options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read() if", "document to the PDF format using the Save method and the PdfSaveOptions class.", "fonts, saving only the glyphs # that the document is using. The file", "opening a rendered PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\")", "the \"use_core_fonts\" property to \"False\" to not apply PDF Type 1 fonts. options.use_core_fonts", "\"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european numerals. # Set the \"numeral_format\" property", "as file: content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201", "to allow the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create", "aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks):", "dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count)", "# def warning(self, info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation:", "same or lower level, # an arrow will appear to the left of", "property to \"True\" to save all hyperlinks using Javascript code # that forces", "pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression ==", "end up in the output PDF. # Set the \"image_compression\" property to \"PdfImageCompression.JPEG\"", "# self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart", "entry in this outline will take us to the location of its respective", "\"Rendering.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "a whole document to PDF with three levels in the document outline. doc", "# Insert headings that can serve as TOC entries of levels 1 and", "the headings' level that will appear in the outline of a saved PDF", "10. November) -1 ( ) 1 (2) -1 (018)] TJ\", # tj_operator.to_string()) def", "and \"Courier New\" is a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name", "aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data = file.read().decode('utf-8')", "2, 3, and 4. # The output PDF document will treat outline levels", "contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool: # \"\"\"Returns True if", "dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render", "0 (a) -1 (g) 1 (,) 0 ( ) 0 (1) 0 (0)", "0 (t) 0 (a) -1 (g) 1 (,) 0 ( ) 0 (1)", "\"PdfPageMode.USE_THUMBS\" to get the PDF reader to display a separate panel # with", "#ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated PDF document.", "# self.assertEqual( # \"[(Samsta) -1 (g) 1 (, 10. November) -1 ( )", "#ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\",", "doc.built_in_document_properties.title = \"Windows bar pdf title\" # Create a \"PdfSaveOptions\" object that we", "#10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the", "R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart", "color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image =", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to \"True\" to save", "link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) #", "#6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\",", "StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)", "0 R/X2 11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", #", "structure, such tags, available via the # \"Content\" navigation pane of Adobe Acrobat", "if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter", "zoom factor when we open the document with it. # Set the \"zoom_factor\"", "# self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a fruit: Apple\",", "a document that we are rendering to PDF. doc = aw.Document(MY_DIR + \"Bookmarks", "/Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \"", "to create PDF document outline entries for headings inside tables. doc = aw.Document()", "self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC:", "lazy dog.\") # Configure our font sources to ensure that we have access", "we open this document with a reader such as Adobe Acrobat, we will", "effects.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "builder = aw.DocumentBuilder(doc) # \"Arial\" is a standard font, and \"Courier New\" is", "every embedded font in the output PDF. options.embed_full_fonts = True # Set the", "True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on images while", "starting from page two, which will only contain the second page. doc.save(stream, options)", "render a portion of the document starting from the second page. options.page_set =", "def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression", "#ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution of images", "# with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: #", "The size of the output document may be larger with this setting. #", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode", "size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action =", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file:", "doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()", "\"interpolate_images\" property to \"True\" to get the reader that opens this document to", "self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) #", "reader to display a separate panel # that allows us to work with", "even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save", "\"jpeg_quality\" property to \"10\" to strengthen compression at the cost of image quality.", "+ \"EMF.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content) else:", "#ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\",", "# to render the EMF+ part of an EMF+ dual metafile if all", "two images from the document will be downsampled at this stage. doc.save(ARTIFACTS_DIR +", "0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type", "792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group", "/Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0", "#self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE,", "be lower than that of the device that is displaying the document. #", "to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\"", "self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the numeral format used when", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object", "booklet. # Set the \"use_book_fold_printing_settings\" property to \"False\" to render the PDF normally.", "options = aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\" to render a portion", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber =", "pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\")", "Type 1 font substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\"", "#ExSummary:Shows how to enable or disable subsetting when embedding fonts while rendering a", "(, 10. November) -1 ( ) 1 (2) -1 (018)] TJ\", # tj_operator.to_string())", "the visual appearance of the document as Aspose.Words convert it to PDF. #", "220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set", "level that will appear in the outline of a saved PDF document. doc", "# Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with", "# Set the \"color_mode\" property to \"GRAYSCALE\" to render all images from the", "1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a", "# self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version)", "no usable headings. # Set the \"create_missing_outline_levels\" property to \"False\" to ignore missing", "/Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>\", content)", "pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def", "else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts =", "options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\")", "it converts to PDF. # In most cases, the color space will be", "that can serve as TOC entries of levels 1, 2, and then 3.", "/XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator)", "#ExSummary:Shows how to sign a saved PDF document digitally and timestamp it. doc", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5, 6,", "pdf title\" # Create a \"PdfSaveOptions\" object that we can pass to the", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level)", "test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to", "0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES):", "\"aw\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines", "aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported", "6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF", "in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\" property to", "the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with", "str) -> bool: # \"\"\"Returns True if a warning with the specified properties", "how to set the numeral format used when saving to PDF. doc =", "of 2, 3, and 4. # The output PDF document will treat outline", "ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED", "pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length)", "scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the", "= aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\")", "it so that the reader does not apply any interpolation. save_options.interpolate_images = interpolate_images", "= 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with", "\" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page", "stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image =", "#text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width,", ".PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails", "have access to both the fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source", "obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0", "property to \"True\" to iterate through all the document # fields and update", "self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self):", "XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47", "\"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\"", "sides of the pages, we can fold all the pages down the middle", "\", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images):", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to \"True\"", "tags, available via the # \"Content\" navigation pane of Adobe Acrobat at the", "property to \"True\" to render embedded EMF data # for metafiles that we", "for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how", "use. # Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from", "True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text positioning operators.", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name", "# our document to save in an output PDF document based on the", "file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( #", "anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\",", "\"rb\") as file: content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent", "values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\")", "of increased file size. # Set the \"export_document_structure\" property to \"False\" to not", "666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>\", content) self.assertIn(", "levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations", "property to \"True\" to turn all footnote/endnote symbols # in the text act", "= ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1,", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get", "+ \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content =", "\"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox jumps over the", "with outline levels that do not contain any corresponding headings when saving a", "cost of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document", "# to modify how that method converts the document to .PDF. pdf_options =", "signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for", "doc = aw.Document() builder = aw.DocumentBuilder(doc) # Create a table with three rows.", "677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5", "85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\",", "\"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count)", "embedded font in the output PDF. # The document's size may become very", "instructions for some PDF readers to follow when opening an output document. doc", "more accuracy and also with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode)", "first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export", "pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded)", "#self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode):", "self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions", "= file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn(", "self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart", "0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14", "#bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for", "i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that we can pass", "# self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent", "+ \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, #", "with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\")", "Set the \"preblend_images\" property to \"False\" to render transparent images normally. options.preblend_images =", "87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>\", content) else: self.assertNotIn(", "at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR", "get the PDF reader to open the saved # document in full-screen mode,", "0 (8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual(", "\"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS,", "+ \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode ==", "a PDF reader to # apply a percentage-based zoom factor when we open", "#image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: #", "to \"PdfPageMode.USE_THUMBS\" to get the PDF reader to display a separate panel #", "of the document as Aspose.Words convert it to PDF. # Set the \"compliance\"", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber =", "builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown fox jumps over the", "# doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self):", "| aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that we can pass to the", "5, 6, 7, 8, 9, 10, 50, 100\") # Create a \"PdfSaveOptions\" object", "\"2\" to automatically expand all heading level 2 and lower outline entries #", "page. doc = aw.Document(MY_DIR + \"WMF with text.docx\") # Create a \"PdfSaveOptions\" object", "file: # content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a fruit: \",", "(False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on images", "== description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details", "cost of increasing the duration of the operation. # Set the \"memory_optimization\" property", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Configure", "rendering quality of DrawingML effects in a document as we save it to", "\"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: #", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"text_compression\" property", "aims to preserve the visual appearance of the document as Aspose.Words convert it", "the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all headings within tables, # such", "(aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action()", "+ \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\",", "self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string())", "file is part of Aspose.Words. The source code in this file # is", "# self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode):", "shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options", "property to \"5\" to include all headings of levels 5 and below in", "5, and no headings with levels of 2, 3, and 4. # The", "# self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) <", "and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options)", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title)", "options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) #", "world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\" # Create a \"PdfSaveOptions\" object that", "of its respective heading. # Set the \"headings_outline_levels\" property to \"5\" to include", "#ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document outline entries for headings inside", "# self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string())", "\"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the U+06F0 to U+06F9 range as numbers.", "aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6, 7,", "based on the parity of their page numbers. # 1 - Save only", "a document as we save it to PDF. doc = aw.Document(MY_DIR + \"DrawingML", "True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and endnotes function", "levels in the document outline. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert", "to follow when opening an output document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\")", "self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85", "save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read() if", "# # it encounters a metafile, which will require raster operations to render", "the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version of DrawingML", "+ \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count)", "warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo] = [] # def", "we have created above from the outline. # Set the \"create_outlines_for_headings_in_tables\" property to", "= aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to not apply any", "shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "This entry is the \"owner\" of several such \"sub-entries\". # In our document,", "= callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary", "collection.\"\"\" # self.warnings.clear() # @property # def count(self): # return len(self.warnings) # def", "True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline levels", "2, 3, and 4 as \"missing\". # Set the \"create_missing_outline_levels\" property to \"True\"", "#table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content = file.read() #if", "else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added", "display just the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd", "embed any fonts in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\",", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to", "as preserving the document structure of the original document. # This helps with", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + #", "render the EMF+ part of an EMF+ dual metafile if all of the", "\"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart", "# Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader to", "Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts # that format text", "\"sub-entries\". # In our document, the outline entries from the 5th heading level", "# doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description,", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5,", "fields and update them before we save it as a PDF. This will", "__init__(self): # self.save_warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.WarningType", "visible. # Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the PDF reader", "programmatically interpreting our document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading", "form fields in the document at # their current values and display them", "Set the \"export_document_structure\" property to \"False\" to not export the document structure. options.export_document_structure", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5,", "\"owner\" entry to collapse/expand all its sub-entries. # Set the \"expanded_outline_levels\" property to", "/Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0", "PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that can", "test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some of the pages", "output document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a", "# Set the \"export_document_structure\" property to \"False\" to not export the document structure.", "property to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane in the output PDF.", "PDF. doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object", "that we can pass to the document's \"save\" method # to modify how", "654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property", "as a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def", "\"Aspose Office\", datetime.now()) # Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\",", "will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor =", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions()", "PDF reader to display just the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR +", "202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W", "location of its respective heading. # Set the \"headings_outline_levels\" property to \"1\" to", "+ \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property to \"36\" to downsample all", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to \"True\" to make", "# When we open this document, we will need to provide the password", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The", "section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks", "\"PdfSaveOptions\" object that we can pass to the document's \"save\" method # to", "apply our signature to the output document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\",", "the \"resolution\" property to \"36\" to downsample all images to 36 ppi. options.downsample_options.resolution", "to downsample all images to 36 ppi. options.downsample_options.resolution = 36 # Set the", "modify how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() #", "= 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\")", "page starting from page two, which will only contain the second page. doc.save(stream,", "jumps over the lazy dog.\") # Create a \"PdfSaveOptions\" object that we can", "# This document contains headings of levels 1 and 5, and no headings", "all # bookmarks at the first level of the outline in the output", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to", "them before we save it as a PDF. This will make sure that", "readers to open these links in new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\"", "any(warning for warning in self.warnings # if warning.source == source and warning.warning_type ==", "If we are rendering the document as a booklet, we must set the", "follow when opening an output document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello", "large, but we will have full use of all fonts if we edit", "every embedded font in the output PDF. # The document's size may become", "PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\" object that we", "the value of the document's \"title\" built-in property in the tab that belongs", "= aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR", "= dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as", "0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)", "kind, either expressed or implied. import io import os from datetime import datetime,", "14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection = pdf_doc.outlines #", "to WMF fonts scaling according to metafile size on the page. doc =", "#ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution of images in", "\"rb\") as file: content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn(", "400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content)", "Set the \"preserve_form_fields\" property to \"False\" to freeze all form fields in the", "updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\" # each time we need them", "use european numerals. # Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the", "#if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #", "to \"PdfPageMode.USE_OC\" to get the PDF reader to display a separate panel #", "def test_export_document_structure(self): for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how", "#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content", "the document to .PDF and applies the configuration # # in our MetafileRenderingOptions", "#ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text positioning operators. doc = aw.Document(MY_DIR", "should there be any, at the cost of increased file size. # Set", "open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read() with io.BytesIO() as stream:", "0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0", "1 font substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello", "encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL", "self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text)", "the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader to open the", "size of the metafile on the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "percentage-based zoom factor when we open the document with it. # Set the", "property to \"True\" to attempt to fix incorrect # element positioning in the", "= ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode =", "Configure the \"digital_signature_details\" object of the \"SaveOptions\" object to # digitally sign the", "document to interpolate images. # Their resolution should be lower than that of", "the form of a book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create", "#ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title of the document as the title", "to any custom fonts if we edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR", "+ \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\",", "#ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign", "#link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in", "the \"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\" as well as preserving the", "== aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in", "tag in the document structure to export the text language. doc = aw.Document()", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format ==", "test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole document", "to try to render every metafile using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK", "0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR", "the \"additional_text_positioning\" property to \"False\" to render the document as usual. save_options.additional_text_positioning =", "select the color space for images in the document that it converts to", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\"", "#elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC,", "self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox", "when opening an output document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\")", "8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents", "we save it to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\") #", "/Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot =", "headings' level that will appear in the outline of a saved PDF document.", "[157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>\", content)", "how to apply text compression when saving a document to PDF. doc =", "image # to see the interpolation effect if we saved the document with", ",۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows", "the PDF reader to display a separate panel # with a thumbnail for", "a document we convert to PDF so that they open new pages when", "a reader such as Adobe Acrobat, we will need to zoom in on", "open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read() if export_document_structure: self.assertIn( b\"5", "/Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>\",", "paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\",", "Aspose.Words to # automatically select the color space for images in the document", "to # preserve the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR", "#ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression type for all images in a", "aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process", "will serve as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row()", "metafiles that we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR +", "will be up to date before saving. # Set the \"update_fields\" property to", "them using updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\" # each time we", "+ \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields else", "Acrobat at the cost of increased file size. # Set the \"export_document_structure\" property", "title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat)", "replace some fonts, # including the two fonts in our document, with their", "test_preblend_images(self): for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to", "ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd", "page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows", "file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377", "\"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt", "8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) #", "we save the document to PDF. # Set the \"text_compression\" property to \"PdfTextCompression.FLATE\"", "# which aims to preserve the visual appearance of the document as Aspose.Words", "only the glyphs # that the document is using. The file will be", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR", "(e) 0 (m) 0 (b) 0 (e) 0 (r) -1 ( ) 1", "is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_language_to_span_tag.pdf\", save_options) #ExEnd", "up the locale to determine what number of glyphs to use. # Set", "XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode", "#self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with", "+ \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000,", "262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type", "# Set the \"jpeg_quality\" property to \"10\" to strengthen compression at the cost", "how to render fallback shapes when saving to PDF. doc = aw.Document(MY_DIR +", "outline navigation pane in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the", "# Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the symbol set from", "a compression type for all images in a document that we are converting", "can pass to the document's \"save\" method # to modify how that method", "smaller, # but we may need access to any custom fonts if we", "to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to", "to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane in the output PDF. save_options.page_mode", "document based on the parity of their page numbers. # 1 - Save", "0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot", "#elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN,", "0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\")", "\"False\" to render transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options)", "with the \"save\" method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My", "and 5th heading level entries are sub-entries of the second 3rd level entry,", "the \"multiple_pages\" # properties of the page setup objects of all sections to", "we save the document to .PDF. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\"", "4, 5, 6, 7, 8, 9, 10, 50, 100\") # Create a \"PdfSaveOptions\"", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # The output PDF document", "contents.\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to", "##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap", "create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file:", "text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages from the", "entry is the \"owner\" of several such \"sub-entries\". # In our document, the", "0 (m) 0 (b) 0 (e) 0 (r) -1 ( ) 1 (2)", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False, True): with self.subTest(update_fields=update_fields):", "to display the title of the document as the title bar. doc =", "# Aspose.Words will also apply Flate compression to all images and ignore the", "set our timeout period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30))", "shapes when saving to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") #", "to sign the document when we save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR", "os from datetime import datetime, timedelta, timezone import aspose.words as aw import aspose.pydrawing", "that format text within WMF images according to the size of the metafile", "description: str) -> bool: # \"\"\"Returns True if a warning with the specified", "\"False\" to not apply PDF Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR +", "\"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within the output PDF document. # Set", "#self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level)", "turn all footnote/endnote symbols # in the text act as links that, upon", "to apply subsetting to fonts, saving only the glyphs # that the document", "= 36 # Set the \"resolution_threshold\" property to only apply the downsampling to", "to make the document structure, such tags, available via the # \"Content\" navigation", "#else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\",", "in a document that we are converting to PDF. doc = aw.Document() builder", "self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property", "not to have footnote/endnote symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR +", "a way that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document =", "as interactive objects in the output PDF. # Set the \"preserve_form_fields\" property to", "pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE):", "in the output PDF, should there be any, at the cost of increased", "on both sides of the pages, we can fold all the pages down", "= aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that we can pass", "self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for", "< text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) #", "( ) 1 (2) -1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold", "a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details", "pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression", "#self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart", "in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document", "pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) #", "self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content)", "document, the bigger the impact that this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR", "the metafile on the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to", "configure Enhanced Windows Metafile-related rendering options when saving to PDF. doc = aw.Document(MY_DIR", "content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page", "table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to", "current values and display them as plain text in the output PDF. pdf_options.preserve_form_fields", "711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ", "to get the outline # to only register headings with heading levels that", "save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo] =", "== aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content)", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to", "# The size of the output document may be larger with this setting.", "all images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\",", "builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object that we can pass to the", "1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading", "the U+0660 to U+0669 range as numbers. # Set the \"numeral_format\" property to", "become very large, but we will have full use of all fonts if", "signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\",", "of levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier =", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2,", "options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version)", "headings we have inserted above will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR +", "save a document to the PDF format in the form of a book", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption via the", "glyphs from the U+06F0 to U+06F9 range as numbers. # Set the \"numeral_format\"", "(m) 0 (s) 0 (t) 0 (a) -1 (g) 1 (,) 0 (", "text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\"", "\"1\" to display all # bookmarks at the first level of the outline", "Suppose we export document structure while saving this document. In that case, #", "#ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the numeral format used when saving", "\"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page", "\"Text positioning operators.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "to provide the password before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd", "3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB", "backgrounds while saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "#text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC,", "= create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if", "+ \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False,", "symbol set from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd", "< text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) #", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\")", "this setting. # Set the \"color_mode\" property to \"NORMAL\" to render all images", "pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with", "pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document =", "= link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window):", "values and display them as plain text in the output PDF. pdf_options.preserve_form_fields =", "# # Set the \"emulate_raster_operations\" property to \"False\" to fall back to bitmap", "in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode", "content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4", "will need to manually update them using updating methods such as \"Field.Update()\", and", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\"", "combo box which will allow a user to choose an option from a", "it to make a booklet. # Set the \"use_book_fold_printing_settings\" property to \"False\" to", "0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\" # Create", "DrawingML effects in a document as we save it to PDF. doc =", "header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that", "#self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for", "\"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "not update all the fields in a document right before a save operation.", "reader that opens this document to interpolate images. # Their resolution should be", "processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings =", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to", "save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as", "+ \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR", "our signature to the output document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options)", "= aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to \"True\" to replace some fonts,", "self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode", "the output PDF, should there be any, at the cost of increased file", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0", "object that we can pass to the document's \"save\" method # to modify", "#4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\",", "# Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. #", "+ \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "the \"owner\" entry to collapse/expand all its sub-entries. # Set the \"expanded_outline_levels\" property", "(aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif", "aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width)", "a table of contents that lists headings in the document body. # Clicking", "a combo box which will allow a user to choose an option from", "number of glyphs to use. # Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to", "the password before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception):", ",۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set", "#5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\",", "property to \"NORMAL\" to render all images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode", "the fields will display # the most accurate values in the PDF. options.update_fields", "method # # to modify how that method converts the document to .PDF", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber", "self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode", "= aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") #", "aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count)", "# We will need to manually update them using updating methods such as", "that method converts the document to .PDF. # Set the \"color_mode\" property to", "aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object that we can pass", "#image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: #", "1 of 2\" if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber()", "#ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved", "a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is a", "# self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not be processed. Possibly unsupported image", "fonts scaling according to metafile size on the page. doc = aw.Document(MY_DIR +", "#action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC,", "property to \"True\" to preblend transparent images # with a background, which may", "def test_interpolate_images(self): for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how", "\"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the PDF reader to display a separate", "for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display", "that will appear in the outline of a saved PDF document. doc =", "info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def", "R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in", "to not apply any # compression to text when we save the document", "filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document", "\"image_compression\" property to \"PdfImageCompression.JPEG\" to use the # \"image_compression\" property to control the", "it to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create a", "aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only render the", "R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR +", "# self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False, True):", "an outline entry has subsequent entries of a higher level inbetween itself and", "to \"True\" to get some PDF readers, such as Adobe Acrobat Pro, #", "aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure):", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\"", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)", "output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow nonstandard", "+ \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")", "/XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS", "Metafile-related rendering options when saving to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") #", "a reader such as Adobe Acrobat, we will see the document scaled at", "\"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode", "\"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype", "in on the image # to see the interpolation effect if we saved", "the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout", "substitute DML shapes with their fallback shapes. # Set the \"dml_rendering_mode\" property to", "aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to \"True\" to lower the memory footprint", "content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS", "Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the PDF reader to display", "of a higher level inbetween itself and the next entry of the same", "# bookmarks at the first level of the outline in the output PDF.", "aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\" # Create a \"PdfSaveOptions\"", "the \"preblend_images\" property to \"True\" to preblend transparent images # with a background,", "\"preblend_images\" property to \"True\" to preblend transparent images # with a background, which", "Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use the # \"image_compression\" property to", "+ \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression ==", "aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot", "Enhanced Windows Metafile-related rendering options when saving to PDF. doc = aw.Document(MY_DIR +", "test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions()", "font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object", "contain an outline, which is a table of contents that lists headings in", "modify how that method converts the document to .PDF. pdf_options = aw.saving.PdfSaveOptions() #", "downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "determine the symbol set from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\",", "2 - Save only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\",", "signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str)", "zoom factor a value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor", "levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2", "XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else:", "self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI", "\"additional_text_positioning\" property to \"False\" to render the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning", "# use glyphs from the U+0660 to U+0669 range as numbers. # Set", "to modify how that method converts the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF)", "= aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every page: options.page_set", "of its respective heading. # Set the \"headings_outline_levels\" property to \"4\" to exclude", "# Set the \"preblend_images\" property to \"True\" to preblend transparent images # with", "+ \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not be processed.", "Jpeg images that end up in the output PDF. # Set the \"image_compression\"", "b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content)", "footnote/endnote symbols # in the text act as links that, upon clicking, take", "content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how", "every glyph of every embedded font in the output PDF. # The document's", "if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "fallback to bitmap rendering and changing type of warnings about unsupported metafile records.", "builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object that we", "self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR", "\"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING", "determine what number of glyphs to use. # Set the \"numeral_format\" property to", "= aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure =", "the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get the PDF", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset)", "self.warnings.clear() # @property # def count(self): # return len(self.warnings) # def contains(self, source:", "#ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable subsetting when embedding fonts while rendering", "bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # The output PDF", "\"4\" to exclude all headings whose levels are above 4 from the outline.", "Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve the default scale of", "to not export the document structure. options.export_document_structure = export_document_structure # Suppose we export", "721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\")", "+ b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page =", "# element positioning in the output PDF, should there be any, at the", "(1) 0 (8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) #", "so that the reader does not apply any interpolation. save_options.interpolate_images = interpolate_images #", "options.zoom_factor = 25 # When we open this document using a reader such", "# \"Page 4 (even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart", "1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" +", "#elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\",", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to \"True\" to arrange", "headings within tables, # such as the one we have created above from", "\"False\" to render the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\",", "but we will have full use of all fonts if we edit the", "aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that can serve as TOC entries", "the document to .PDF. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to", "content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method", "preserve custom properties in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\",", "#6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\",", "create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber =", "#self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False,", "the resolution of images in the PDF document. doc = aw.Document(MY_DIR + \"Images.docx\")", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode ==", "711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "+ \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE:", "Type 1 equivalents. # Set the \"use_core_fonts\" property to \"False\" to not apply", "property to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with their fallback shapes. #", "builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object that we can pass", "def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default zooming", "(2) -1 (0) 0 (1) 0 (8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000,", "obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8", "self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode", "image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read()", "builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: #", "in the outline of a saved PDF document. doc = aw.Document() builder =", "not apply any # compression to text when we save the document to", "we are rendering the document as a booklet, we must set the \"multiple_pages\"", "create PDF document outline entries for headings inside tables. doc = aw.Document() builder", "#ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the fields in a document immediately", "5 and below in the outline. save_options.outline_options.headings_outline_levels = 5 # This document contains", "effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file:", "aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲", "self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0", "= pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0", "property to \"True\" to include all missing levels in the outline, # leaving", "1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\")", "has subsequent entries of a higher level inbetween itself and the next entry", "property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. # Set the \"dml_effects_rendering_mode\" property", "document structure, such tags, available via the # \"Content\" navigation pane of Adobe", "which can assist in programmatically interpreting our document. doc = aw.Document() builder =", "self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property", "update all the fields in a document immediately before saving it to PDF.", "Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed all fonts in the output", "0 (e) 0 (m) 0 (b) 0 (e) 0 (r) -1 ( )", "to \"False\" to render the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR +", "their PDF Type 1 equivalents. # Set the \"use_core_fonts\" property to \"False\" to", "\"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG:", "self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def", "color space for all images in the saved PDF. # Aspose.Words will also", "and timestamp it. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") #", "# Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader #", "to the location of its respective heading. # Set the \"headings_outline_levels\" property to", "\"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2,", "\"image_compression\" property to control the quality of all images that end up in", "in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure", "R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart", "#with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content = file.read().decode('utf-8') #if preserve_form_fields:", "self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text)", "# Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with", "#9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows", "in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how", "property to \"True\" to lower the memory footprint of large documents' saving operations", "PDF in a way that helps us use it to make a booklet.", "larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\" property to \"False\"", "open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read() if interpolate_images: self.assertIn( b\"7", "Times New Roman fonts into a PDF document. doc = aw.Document() builder =", "device that is displaying the document. # Set the \"interpolate_images\" property to \"False\"", "be desirable. # Set the \"embed_full_fonts\" property to \"True\" to embed every glyph", "#elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False,", "#3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\",", "of the same or lower level, # an arrow will appear to the", "how to set the default zooming that a reader applies when opening a", "# self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\",", "self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply our signature to the output", "self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\",", "to \"False\" to exclude all headings within tables, # such as the one", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read() if create_note_hyperlinks:", "file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT", "unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF with image.docx\")", "the document scaled at 1/4 of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options)", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action()", ".PDF. options = aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties that we can", "respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to \"False\" not to have footnote/endnote", "aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded)", "= aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only the odd-numbered", "save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder,", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to \"True\"", "\"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\")", "how to write additional text positioning operators. doc = aw.Document(MY_DIR + \"Text positioning", "595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", #", "#self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode", "self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling according to metafile size", "that we are converting to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg", "\"EmfPlusDualRenderingMode.EMF\" # to only render the EMF part of an EMF+ dual metafile.", "not contain any corresponding headings when saving a PDF document. doc = aw.Document()", "self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the PDF standards compliance level", "vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object that", "standards compliance level of saved PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\") #", "#self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\",", "# Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the", "embedding Arial and Times New Roman fonts into a PDF document. doc =", "to \"5\" to include all headings of levels 5 and below in the", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\",", "+ \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype", "#if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5", "# # Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to render every", "tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g)", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Since our document contains", "options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property to", "builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\" # Create a \"PdfSaveOptions\" object", "733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ", "self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent", "True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document outline entries", "722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf", "the zoom factor a value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR", "render the EMF part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\"", "4. # The output PDF document will treat outline levels 2, 3, and", "that belongs to this document. # Set the \"display_doc_title\" to \"False\" to get", "Set the \"use_book_fold_printing_settings\" property to \"False\" to render the PDF normally. options.use_book_fold_printing_settings =", "each time we need them to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\")", "source and warning.warning_type == type and warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails", "fallback shapes. # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render the", "property to \"False\" to fall back to bitmap when # # it encounters", "(,) 0 ( ) 0 (1) 0 (0) 0 (.) 0 ( )", "[70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document =", "\"additional_text_positioning\" property to \"True\" to attempt to fix incorrect # element positioning in", "headings that can serve as TOC entries of levels 1, 2, and then", "0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in (False,", "embedded EMF data # for metafiles that we can render as vector graphics.", "self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that", "as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european numerals.", "self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document to the PDF format", "specified properties has been generated.\"\"\" # return any(warning for warning in self.warnings #", "exclude all headings whose levels are above 4 from the outline. options.outline_options.headings_outline_levels =", "their current values and display them as plain text in the output PDF.", "builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page =", "cost of increased file size. # Set the \"additional_text_positioning\" property to \"False\" to", "pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to", "to \"False\" not to have footnote/endnote symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks", "applies the configuration # # in our MetafileRenderingOptions object to the saving operation.", "pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")", "+ \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" +", "#if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber", "+ \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE,", "to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\",", "# Set the \"compliance\" property to \"PdfCompliance.PDF17\" to comply with the \"1.7\" standard.", "pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert text with PAGE and", "when embedding fonts while rendering a document to PDF. doc = aw.Document() builder", "a PDF. This will make sure that all the fields will display #", "\"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\" as well as preserving the document", "#ExSummary:Shows how to work with outline levels that do not contain any corresponding", "the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the U+0660 to", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000,", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard", "as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression", "builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6, 7, 8,", "options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every page:", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\"", "# Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed all fonts in the", "< text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) #", "from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document =", "to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with their fallback shapes. # Set", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object that", "pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: #", "# self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self):", "builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\"", "compression # to text when we save the document to PDF. The larger", "properties of the page setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold:", "0/Tabs /S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0", "/XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>\",", "#ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings' level that will appear in the", "= header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name", "open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a \"PdfSaveOptions\" object that we", "book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that", "# document in full-screen mode, which takes over the monitor's display and has", "options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to \"True\" to make the", "doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that can serve as", "document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\"", "# self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD:", "property to \"EMBED_ALL\" to embed all fonts in the output PDF. # Set", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document =", "as file: content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3", "# def __init__(self): # self.save_warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): #", "0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\"", "# self.assertEqual(8, len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL,", "dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0", "larger than the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR +", "how to set the PDF standards compliance level of saved PDF documents. doc", "#ExSummary:Shows how to set a different color space for images in a document", "heading. # Set the \"headings_outline_levels\" property to \"1\" to get the outline #", "\"False\" to freeze all form fields in the document at # their current", "#self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False,", "= file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \"", "#self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority)", "aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties while", "#if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15,", "Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with their", "PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\" object that", "# Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding", "FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only", "numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber =", "PDF. doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\" object", "== aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR +", "with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the fields in", "save the document as a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\",", "respective heading. # Set the \"headings_outline_levels\" property to \"4\" to exclude all headings", "aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read() if pdf_custom_properties_export_mode ==", "builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream:", "= aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img)", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to \"True\" to", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page", "# \"[(Samsta) -1 (g) 1 (, 10. November) -1 ( ) 1 (2)", "aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different color", "the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in an XMP", "its sub-entries. # Set the \"expanded_outline_levels\" property to \"2\" to automatically expand all", "output PDF. options.embed_full_fonts = True # Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to", "/Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\",", "output PDF document based on the parity of their page numbers. # 1", "the document starting from the second page. options.page_set = aw.saving.PageSet(1) # This document", "PDF with three levels in the document outline. doc = aw.Document() builder =", "document is using. The file will be considerably smaller, # but we may", "fonts, # including the two fonts in our document, with their PDF Type", "0 R /XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078", "+ \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber)", "print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in", "the most accurate values in the PDF. options.update_fields = update_fields # We can", "0 (a) 0 (m) 0 (s) 0 (t) 0 (a) -1 (g) 1", "\"Images.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "property to \"1\" to get the outline # to only register headings with", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for", "Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date)", "doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\" object that we can pass to", "and find tags for elements such as the heading # and the next", "effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\")", "0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self):", "builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object", "= aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts #", "= aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as", "the \"preserve_form_fields\" property to \"False\" to freeze all form fields in the document", "next entry of the same or lower level, # an arrow will appear", "in a document we convert to PDF so that they open new pages", "builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier =", "entry. This entry is the \"owner\" of several such \"sub-entries\". # In our", "document to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to \"True\"", "#ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text positioning operators. doc = aw.Document(MY_DIR +", "are above 2 from the outline. # The last two headings we have", "to PDF. # Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP compression", "to change image color with saving options property. doc = aw.Document(MY_DIR + \"Images.docx\")", "color space will be RGB. # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" #", "the duration of the operation. # Set the \"memory_optimization\" property to \"False\" to", "#text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text)", "way that helps us use it to make a booklet. # Set the", "one we have created above from the outline. # Set the \"create_outlines_for_headings_in_tables\" property", "\"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045,", "Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard # custom document properties as", "no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\" property to", "save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7,", "we open this document using a reader such as Adobe Acrobat, we will", "builder.writeln(f\"Page {i + 1} ({'odd' if i % 2 == 0 else 'even'})\")", "save the document to PDF. The larger the document, the bigger the impact", "aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A,", "document right before a save operation. # This is the preferable option if", "how to work with outline levels that do not contain any corresponding headings", "document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page", "of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document =", "\"2\" to exclude all headings whose levels are above 2 from the outline.", "PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder", "mode, which takes over the monitor's display and has no controls visible. #", "on the page. doc = aw.Document(MY_DIR + \"WMF with text.docx\") # Create a", "/XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content = file.read().decode('utf-8')", "display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title)", "# def warning(self, info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\")", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read() if interpolate_images:", "== aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content)", "aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font", "saving this document. In that case, # we can open it using Adobe", "set a different color space for images in a document as we export", "output document may be desirable. # Set the \"embed_full_fonts\" property to \"True\" to", "# When we open this document with a reader such as Adobe Acrobat,", "# self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) #", "# def clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property # def", "in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the", "our font sources to ensure that we have access to both the fonts", "file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0", "self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply our signature to", "to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\")", "#self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\"", "that is no larger than the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables =", "10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0", "to display all # bookmarks at the first level of the outline in", "to iterate through all the document # fields and update them before we", "self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()", "# return self.warnings[i] # def clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() #", "643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>\", content) else:", "original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()", "effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions()", "[ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters: with", "# Set the \"open_hyperlinks_in_new_window\" property to \"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window", "code in this file # is only intended as a supplement to the", "our document to save in an output PDF document based on the parity", "pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\",", "612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0", "datetime.now()) # Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") #", "compression at the cost of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\",", "XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806", "Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard, #", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent", "def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode", "provide the password before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with", "0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14", "Signing\", \"Aspose Office\", datetime.now()) # Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\",", "apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0 (a)", "that occur upon saving a document.\"\"\" # def __init__(self): # self.warnings = aw.WarningInfoCollection()", "# self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): #", "open this document using a reader such as Adobe Acrobat, we will see", "\"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\",", "to open the saved # document in full-screen mode, which takes over the", "options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout period via the constructor. options.digital_signature_details.timestamp_settings =", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of", "in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all", "documents searchable but may significantly increase the size of already large documents. save_options.compliance", "aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U,", "options.embed_full_fonts = True # Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed all", "property to \"True\" to replace some fonts, # including the two fonts in", "to get the PDF reader # also to display the outline, if possible.", "test_update_fields(self): for update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to", "#self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields", "the document as Aspose.Words convert it to PDF. # Set the \"compliance\" property", "real time. # We will need to manually update them using updating methods", "\"GRAYSCALE\" to render all images from the document in black and white. #", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to not", "Enable encryption via the \"encryption_details\" property. save_options.encryption_details = encryption_details # When we open", "#if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance ==", "= aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\",", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\")", "Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look up the locale to", "builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading", "opens this document to interpolate images. # Their resolution should be lower than", "document to .PDF. options = aw.saving.PdfSaveOptions() # Create a digital signature and assign", "to embed all fonts in the output PDF. # Set the \"font_embedding_mode\" property", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\" property", "== aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False, True): with", "# Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part", "+ \"Text positioning operators.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "a heading level that is no larger than the value of the \"headings_outline_levels\"", "pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name)", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR +", "all fonts if we edit the PDF. # Set the \"embed_full_fonts\" property to", "#self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False, True):", "obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318", "/FAAABD 12 Tf )/AP<</N 12 0 R>>>>\", # content) # form = pdf_document.form", "in this outline will take us to the location of its respective heading.", "how to set a different color space for images in a document as", "self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a", "render all images from the document in black and white. # The size", "aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object that we can pass to", "\"rb\") as file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type", "text language. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options", "\"font_embedding_mode\" property to \"EMBED_ALL\" to embed all fonts in the output PDF. #", "aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" + # \"Page", "file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents", "create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work", "to specify a compression type for all images in a document that we", "PDF format using the Save method and the PdfSaveOptions class. doc = aw.Document()", "\"open_hyperlinks_in_new_window\" property to \"True\" to save all hyperlinks using Javascript code # that", ",۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to", "# self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): #", "that we have access to both the fonts in this document. original_fonts_sources =", "sign the document as we render it with the \"save\" method. signing_time =", "#bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with", "look up the locale to determine what number of glyphs to use. #", "#elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False,", "of the entry. This entry is the \"owner\" of several such \"sub-entries\". #", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\"", "pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words", "0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR", "PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose", "doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object that", "R>>\", data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) #", "save_options.encryption_details = encryption_details # When we open this document, we will need to", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object that we", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent", "\"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\"", "in doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this", "True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR", ".PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain an outline,", "R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS:", "for some PDF readers to follow when opening an output document. doc =", "create_note_hyperlinks: # self.assertEqual(8, len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) #", "aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the numeral format", "must set the \"multiple_pages\" # properties of the page setup objects of all", "in the first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def", "self.save_warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS:", "for i in range(5): builder.writeln(f\"Page {i + 1} ({'odd' if i % 2", "def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode", "apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional", "significantly increase the size of already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR +", "this outline will take us to the location of its respective heading. #", "self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with", "the lazy dog.\") # Configure our font sources to ensure that we have", "use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\",", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content", "\"font_embedding_mode\" property to \"EMBED_NONE\" to not embed any fonts in the output PDF.", "to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to", "and 4. # The output PDF document will treat outline levels 2, 3,", "aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor", "open new pages when we click on them. doc = aw.Document() builder =", "builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\" object that we can", "drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object that we", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217,", "\"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE:", "== aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "document on both sides of the pages, we can fold all the pages", "how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression =", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks =", "PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options)", "property to \"EmfPlusDualRenderingMode.EMF\" # to only render the EMF part of an EMF+", "b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8", "< text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else:", "the \"expanded_outline_levels\" property to \"2\" to automatically expand all heading level 2 and", "make the document structure, such tags, available via the # \"Content\" navigation pane", "+ # \"Page 2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page", "property to \"36\" to downsample all images to 36 ppi. options.downsample_options.resolution = 36", "the second 4th level outline entry, # the 4th and 5th heading level", "correct value in real time. # We will need to manually update them", "the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier =", "Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader to open", "properties in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd", "\"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\",", "act as links that, upon clicking, take us to their respective footnotes/endnotes. #", "and \"Document.UpdateFields()\" # each time we need them to display accurate values. builder.write(\"Page", "is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout period via", "options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd", "to \"1\" to get the outline # to only register headings with heading", "options) # 3 - Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\",", "only intended as a supplement to the documentation, and is provided # \"as", "= bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels):", "as the one we have created above from the outline. # Set the", "output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "= memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q=", "title of the document as the title bar. doc = aw.Document() builder =", "215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type", "aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan of the timestamp is 100 seconds.", "text within WMF images according to the size of the metafile on the", "options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use", "update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update", "method and the PdfSaveOptions class. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK)", "Aspose.Words to skip embedding Arial and Times New Roman fonts into a PDF", "__init__(self): # self.warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.warning_type", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to", "to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version of DrawingML effects. # Set", "# preserve the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR +", "# Their resolution should be lower than that of the device that is", "\"save\" method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time)", "property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in an XMP packet. options.custom_properties_export", "0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12", "else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False, True): with", "0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback", "# Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color", "aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode ==", "0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details", "rendering and changing type of warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self): #", "= aw.Document(MY_DIR + \"WMF with text.docx\") # Create a \"PdfSaveOptions\" object that we", "\"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes themselves. options.dml_rendering_mode =", "0>>/Dest[5 0 R /XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463", "content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE,", "36 ppi. options.downsample_options.resolution = 36 # Set the \"resolution_threshold\" property to only apply", "pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def", "\"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for", "select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype", "In that case, # we can open it using Adobe Acrobat and find", "bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options", "and also with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR +", "links in new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property to \"False\" to", "# Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes", "#ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers in a document", "content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\",", "R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0", "in an output PDF document based on the parity of their page numbers.", "aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to skip", "= aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object that we can", "RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo):", "from the outline. options.outline_options.headings_outline_levels = 4 # If an outline entry has subsequent", "and applies the configuration # # in our MetafileRenderingOptions object to the saving", "export it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR", "#ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable subsetting when embedding fonts while", "to modify how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions()", "of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\"", "+ \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode", "may be desirable. # Set the \"embed_full_fonts\" property to \"True\" to embed every", "aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\"))", "6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content)", "1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4", "link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654", "using. The file will be considerably smaller, # but we may need access", "aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to", "6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1", "##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap rendering and changing type", "# fields and update them before we save it as a PDF. This", "# Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP compression # to", "are rendering the document as a booklet, we must set the \"multiple_pages\" #", "from the document will be downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options)", "option if we know that all our fields will be up to date", "# pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL,", "inserted above will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd", "+ \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode", "document body. # Clicking on an entry in this outline will take us", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn(", "MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how", "outline level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\",", "+ \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR", "#self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan)", "for headings inside tables. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Create a", "numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5, 6, 7, 8, 9,", "0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count)", "# their current values and display them as plain text in the output", "test_interpolate_images(self): for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to", "use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document =", "#self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\",", "# self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count)", "content = file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width", "text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create", "#ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering quality", "content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart", "with self.subTest(uri=uri, result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR", "= aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output PDF document will contain an", "+ \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode ==", "= aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR +", "pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text)", "9, 10, 50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣,", "so that they open new pages when we click on them. doc =", "file.read() with io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content)", "to apply text compression when saving a document to PDF. doc = aw.Document()", "the document's \"title\" built-in property in the tab that belongs to this document.", "with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title of the document", "all level and 3 and higher entries when we open the document. options.outline_options.expanded_outline_levels", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0", "than the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\",", "# print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo: # return", "is part of Aspose.Words. The source code in this file # is only", "# self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR +", "format text within WMF images according to the size of the metafile on", "# doc = aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback", "Adobe Acrobat Pro, # to display the value of the document's \"title\" built-in", "Aspose Pty Ltd. All Rights Reserved. # # This file is part of", "are three \"page_set\" properties that we can use to filter out a set", "\"headings_outline_levels\" property to \"1\" to get the outline # to only register headings", "= aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\" object that we", "CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100\")", "in the output PDF. options.embed_full_fonts = True # Set the \"font_embedding_mode\" property to", "= aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the editing of annotations. encryption_details.permissions =", "(1) 0 (0) 0 (.) 0 ( ) 0 (N) 0 (o) 0", "than that of the device that is displaying the document. # Set the", "in the document body. # Clicking on an entry in this outline will", "= aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber", "next paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR +", "1 (2) -1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False,", "the cost of increased file size. # Set the \"additional_text_positioning\" property to \"False\"", "aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self):", "True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document to the", "first row, # whose text we will format in a heading-type style, will", "to exclude all headings whose levels are above 2 from the outline. #", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN:", "= aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) # Create a timestamp authority-verified timestamp.", "save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\")", "#self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date)", "#self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode", "# Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to include all headings within tables", "721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype", "#input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as", "aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to \"True\" to turn all footnote/endnote symbols", "# Set the \"headings_outline_levels\" property to \"1\" to get the outline # to", "aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\" object that we can pass", "in the outline. save_options.outline_options.headings_outline_levels = 5 # This document contains headings of levels", "not apply any interpolation. save_options.interpolate_images = interpolate_images # When we open this document", "= aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" + #", "Set the \"color_mode\" property to \"NORMAL\" to render all images in color. pdf_save_options", "= warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR +", "lists headings in the document body. # Clicking on an entry in this", "odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save", "aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True):", "to sign a saved PDF document digitally and timestamp it. doc = aw.Document()", "that opens this document to interpolate images. # Their resolution should be lower", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning:", "Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings = aw.WarningInfoCollection()", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to", "\"10\" to strengthen compression at the cost of image quality. pdf_save_options.jpeg_quality = 10", "# This document will contain one page starting from page two, which will", "0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) #", "file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5", "all images and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR +", "datetime import datetime, timedelta, timezone import aspose.words as aw import aspose.pydrawing as drawing", "= embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else:", "we are converting to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\")", "property. save_options.encryption_details = encryption_details # When we open this document, we will need", "a supplement to the documentation, and is provided # \"as is\", without warranty", "= aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\" object that we", "background, which may reduce artifacts. # Set the \"preblend_images\" property to \"False\" to", "\\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode):", "aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions", "\"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard, # which aims to preserve the", "Aspose.Words will also apply Flate compression to all images and ignore the \"image_compression\"", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem", "aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612", ".PDF. # Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader", "the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode ==", "to \"False\" to not update all the fields in a document right before", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name)", "#image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif", "export the document structure. options.export_document_structure = export_document_structure # Suppose we export document structure", "pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode ==", "\"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date())", "in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document", "this document. # Set the \"display_doc_title\" to \"False\" to get such readers to", "to bitmap rendering and changing type of warnings about unsupported metafile records. #def", "edit the PDF. # Set the \"embed_full_fonts\" property to \"False\" to apply subsetting", "file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality", "3, and 4. # The output PDF document will treat outline levels 2,", "save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\")", "/UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages", "builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\",", "to \"True\" to scale fonts # that format text within WMF images according", "the \"embed_full_fonts\" property to \"True\" to embed every glyph of every embedded font", "/UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3", "while rendering a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name", "pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR", "# self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count)", "0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber", "\"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with more accuracy and", "as Adobe Acrobat, we will see the document scaled at 1/4 of its", "= aw.DocumentBuilder(doc) # \"Arial\" is a standard font, and \"Courier New\" is a", "outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\",", "a document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a", "# metafile_rendering_options.emulate_raster_operations = False # # Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to", "file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0", "\"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object that we can pass to the", "# print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode", "headings of levels 1 and 5, and no headings with levels of 2,", "(aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom", "more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd", "on the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve", "711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "\"Page 3 (odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to \"True\"", "#ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers in a document that we are", "above 128 ppi. options.downsample_options.resolution_threshold = 128 # Only the first two images from", "open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\"", "#if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode ==", "0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI", "# self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text)", "in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get the", "b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1]", "and changing type of warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc", ".PDF. options = aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all images in a", "PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\",", "Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with", "builder.writeln(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100\") #", "# \"Page 3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" + # \"Page 5", "all its sub-entries. # Set the \"expanded_outline_levels\" property to \"2\" to automatically expand", "the cost of increasing the duration of the operation. # Set the \"memory_optimization\"", "footnote/endnote symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd", "all the pages down the middle at once, # and the contents will", "that we are rendering to PDF. doc = aw.Document(MY_DIR + \"Bookmarks in headers", "to \"False\" to make it so that the reader does not apply any", "title bar. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows", "0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12", "#ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document to the PDF format in the", "white. # The size of the output document may be larger with this", "as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else:", "#ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution of images in the PDF document.", "starting from the second page. options.page_set = aw.saving.PageSet(1) # This document will contain", "builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a", "page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\",", "large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc =", "pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance", "#self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0", "a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR", "with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory consumption when", "of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart", "section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document on", "doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur", "pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\",", "-> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR +", "a book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object", "effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents", "options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\")", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\"", "itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with", "for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677", "save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not", "helps us use it to make a booklet. # Set the \"use_book_fold_printing_settings\" property", "True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document to the", "13 0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with", "self.assertEqual( # \"[(Samsta) -1 (g) 1 (, 10. November) -1 ( ) 1", "Set the \"headings_outline_levels\" property to \"4\" to exclude all headings whose levels are", "a document.\"\"\" # def __init__(self): # self.warnings = aw.WarningInfoCollection() # def warning(self, info:", "#ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions for some PDF readers to follow", "the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes themselves. options.dml_rendering_mode", "The first row, # whose text we will format in a heading-type style,", "will appear to the left of the entry. This entry is the \"owner\"", "our MetafileRenderingOptions object to the saving operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options", "the document to PDF. # Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply", "level entries are sub-entries of the second 3rd level entry, and so on.", "need access to any custom fonts if we edit the document. options.embed_full_fonts =", "\"rb\") as file: # content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED):", "document. # This helps with making documents searchable but may significantly increase the", "builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object that we can pass to", "builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # The", "aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1,", "\"emulate_raster_operations\" property to \"False\" to fall back to bitmap when # # it", "Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default", "10, 50, 100\") # Create a \"PdfSaveOptions\" object that we can pass to", "# Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to not apply any # compression", "with the \"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\" as well as preserving", "#2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\",", "the EMF+ part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property", "\"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read()", "\"owner\" of several such \"sub-entries\". # In our document, the outline entries from", "else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6 0", "5th heading level entries are sub-entries of the second 3rd level entry, and", "# properties of the page setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if", "aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\"", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\"", "document as a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd", "+ \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content =", "+ # \"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber =", "property. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that we", "convert to PDF so that they open new pages when we click on", "to preblend transparent images # with a background, which may reduce artifacts. #", "from the 5th heading level are sub-entries of the second 4th level outline", "resolution should be lower than that of the device that is displaying the", "Rights Reserved. # # This file is part of Aspose.Words. The source code", "are sub-entries of the second 4th level outline entry, # the 4th and", "action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows", "at # their current values and display them as plain text in the", ",۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def", "\"JohnDoe\", \"<PASSWORD>\") # The default lifespan of the timestamp is 100 seconds. self.assertEqual(100.0,", "saving operations # at the cost of increasing the duration of the operation.", "outline. # Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to include all headings within", "# The default lifespan of the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) #", "compression to text when we save the document to PDF. # Set the", "= aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document on both sides of the", "= aw.Document() builder = aw.DocumentBuilder(doc) # Insert text with PAGE and NUMPAGES fields.", "io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue()))", "that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF", "#ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline levels that do not", "entries since there are no usable headings. # Set the \"create_missing_outline_levels\" property to", "#self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self):", "inbetween itself and the next entry of the same or lower level, #", "# Set the \"resolution_threshold\" property to only apply the downsampling to # images", "#ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with transparent backgrounds while saving a document", "+ \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber", "save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to \"True\" to render embedded", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text)", "how to set permissions on a saved PDF document. doc = aw.Document() builder", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Create a digital", "content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) #", "new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property to \"False\" to save all", "Extend permissions to allow the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY", "XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48", "document.\"\"\" # def __init__(self): # self.warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo):", "for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how", "= metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR +", "options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR", "# pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def", "/Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654", "the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to # apply", "0 (m) 0 (s) 0 (t) 0 (a) -1 (g) 1 (,) 0", "= form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: #", "doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that we can", "\"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only render the EMF part of an", "4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0", "== aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) #", "#ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages from the document. doc =", "pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image =", "the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard # custom document properties as we", "\"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP compression # to text when we", "Otherwise, Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the", "over the lazy dog.\") # Create a \"PdfSaveOptions\" object that we can pass", "# The \"save\" method will apply our signature to the output document at", "render a simplified version of DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to", "the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard, # which", "test_doc_title(self): for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to", "exclude all headings within tables, # such as the one we have created", "structure while saving this document. In that case, # we can open it", "images in a document that we save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images)", "fallback shapes when saving to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\")", "the output PDF. # Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use the", "scaled at 1/4 of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document", "(g) 1 (,) 0 ( ) 0 (1) 0 (0) 0 (.) 0", "٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸", "\"False\" to make it so that the reader does not apply any interpolation.", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields", "aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My", "document to .PDF. # Set the \"display_doc_title\" to \"True\" to get some PDF", "#ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory consumption when rendering large documents", "file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\",", "it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as", "from # our document to save in an output PDF document based on", "than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to", "def test_embed_core_fonts(self): for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how", "#ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes when saving to PDF.", "a document that we are converting to PDF. doc = aw.Document() builder =", "11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype", "field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else:", "display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\",", "# In the outline, we can click on the arrow of the \"owner\"", "outline entry, # the 4th and 5th heading level entries are sub-entries of", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name)", "the \"display_doc_title\" to \"True\" to get some PDF readers, such as Adobe Acrobat", "0 0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field =", "document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output PDF", "\"rb\") as file: content = file.read() with io.BytesIO() as stream: image.save(stream) if preblend_images:", "= page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations", "to \"NumeralFormat.EUROPEAN\" to use european numerals. # Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\"", "2 and lower outline entries # and collapse all level and 3 and", ".PDF. # Set the \"color_mode\" property to \"GRAYSCALE\" to render all images from", "# self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( #", "only apply the downsampling to # images with a resolution that is above", "document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with", "when saving a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert", "+ \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "plain text in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options)", "any # compression to text when we save the document to PDF. #", "\"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property to \"36\" to downsample all images", "provided that they have a heading level that is no larger than the", "we have inserted above will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\",", "formatting loss-related warnings that occur upon saving a document.\"\"\" # def __init__(self): #", "pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length)", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2,", "aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback =", "for all images in the saved PDF. # Aspose.Words will also apply Flate", "of levels 1, 2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\")", "pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\")", "click on them. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) #", "def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole", "0>>/Dest[5 0 R /XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635", "test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode", "# \"Page 3 (odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "how to export custom properties while converting a document to PDF. doc =", "#pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "aw.saving.PdfSaveOptions() # Create a digital signature and assign it to our SaveOptions object", "endnotes function as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create", "of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections: section =", "# not export any bookmarks that are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\"", "to # only export bookmarks in the first section's header/footers. # Set the", "aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document =", "value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 #", "expand all heading level 2 and lower outline entries # and collapse all", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\",", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to", "each page in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to", "such as Adobe Acrobat, we will see the document scaled at 1/4 of", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL))", "structure. options.export_document_structure = export_document_structure # Suppose we export document structure while saving this", "#bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self):", "Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes themselves.", "builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create a \"PdfSaveOptions\" object that", "Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding in", "fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source])", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" +", "+ \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if", "document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources()", "how that method converts the document to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set", "options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read() if", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\",", "document with a reader such as Adobe Acrobat, we will need to zoom", "aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced", "sub-entries. # Set the \"expanded_outline_levels\" property to \"2\" to automatically expand all heading", "#ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color with saving options property. doc", "\"color_mode\" property to \"GRAYSCALE\" to render all images from the document in black", "= pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) #", "Pro, # to display the value of the document's \"title\" built-in property in", "\"\") # Start by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions", "aw.Document() builder = aw.DocumentBuilder(doc) # Insert text with PAGE and NUMPAGES fields. These", "background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object that we can pass to", "R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn(", "through all the document # fields and update them before we save it", "R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0", "format in a heading-type style, will serve as the column header. builder.start_table() builder.insert_cell()", ".PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to \"True\" to save", "we are rendering to PDF. doc = aw.Document(MY_DIR + \"Bookmarks in headers and", "#ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the PDF standards compliance level of", "datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test", "3, 4, 5, 6, 7, 8, 9, 10, 50, 100\") # Create a", "manually update them using updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\" # each", "to configure Enhanced Windows Metafile-related rendering options when saving to PDF. doc =", "of the \"owner\" entry to collapse/expand all its sub-entries. # Set the \"expanded_outline_levels\"", "value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd", "the location of its respective heading. # Set the \"headings_outline_levels\" property to \"4\"", "pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with", "aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() #", "= section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document on both", "with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties while converting", "in real time. # We will need to manually update them using updating", "document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get the PDF reader", "to display a separate panel # with a thumbnail for each page in", "to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\"", "and Times New Roman fonts into a PDF document. doc = aw.Document() builder", "content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4", "pass to the document's \"save\" method # # to modify how that method", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count)", "the outline, we can click on the arrow of the \"owner\" entry to", "the one we have created above from the outline. # Set the \"create_outlines_for_headings_in_tables\"", "/Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711", "3, 4, 5, 6, 7, 8, 9, 10, 50, 100\", text_absorber.text) #elif numeral_format", "type for all images in a document that we are converting to PDF.", "to preserve custom properties within the output PDF document. # Set the \"custom_properties_export\"", "14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\" +", "def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows", "document. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result,", "to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding in the output PDF. #", "# with a background, which may reduce artifacts. # Set the \"preblend_images\" property", "#self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode", "\"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\"))", "to \"True\" to make the document structure, such tags, available via the #", "not be processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): #", "# self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) <", "PDF. # Set the \"preserve_form_fields\" property to \"False\" to freeze all form fields", "to make it so that the reader does not apply any interpolation. save_options.interpolate_images", "\"memory_optimization\" property to \"True\" to lower the memory footprint of large documents' saving", "+ \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber)", "595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0", "document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\"", "#7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\",", "already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document =", "are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only", "method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm", "(odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1", "to set the numeral format used when saving to PDF. doc = aw.Document()", "a fruit: \") # Insert a combo box which will allow a user", "\"False\" to apply subsetting to fonts, saving only the glyphs # that the", "# self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions", "= file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0", "#ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content =", "info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description)", "[0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3", "how that method converts the document to .PDF. # Set the \"display_doc_title\" to", "the \"emulate_raster_operations\" property to \"False\" to fall back to bitmap when # #", "text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\" tag in", "a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ", "self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in", "regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "value of the document's \"title\" built-in property in the tab that belongs to", "# in the output PDF in a way that helps us use it", "that is displaying the document. # Set the \"interpolate_images\" property to \"False\" to", "0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML shape", "\"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read()", "builder.insert_image(img) # Create a \"PdfSaveOptions\" object that we can pass to the document's", "R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0", "# self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) <", "True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts()))", "#ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default zooming that a reader applies when", "shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR +", "quick brown fox jumps over the lazy dog.\") # Configure our font sources", "property to \"False\" to not update all the fields in a document right", "= aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum", "= aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object that we", "= pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806", "= create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as", "# b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as", "def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self, i) ->", "\"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_language_to_span_tag.pdf\",", "R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in the first", "self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber =", "i in range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, \" +", "aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to", "(aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent", "to \"2\" to exclude all headings whose levels are above 2 from the", "PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "#self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert", "# automatically select the color space for images in the document that it", "ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut", "aliqua.\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters", "aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set", "= preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image", "saving a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings", "will be considerably smaller, # but we may need access to any custom", "to \"True\" to replace some fonts, # including the two fonts in our", "how to perform interpolation on images while saving a document to PDF. doc", "warnings that occur upon saving a document.\"\"\" # def __init__(self): # self.warnings =", "PDF standards compliance level of saved PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\")", "PDF document will contain an outline, which is a table of contents that", "for metafiles that we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\"", "(False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline", "+ \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2", "table with three rows. The first row, # whose text we will format", "= aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38,", "\"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to render every metafile using vector graphics.", "shape effects.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "+ # \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to", "import datetime, timedelta, timezone import aspose.words as aw import aspose.pydrawing as drawing from", "all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR", "PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\" object that we", "# aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber()", "a higher level inbetween itself and the next entry of the same or", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Create", "create a \"Span\" tag in the document structure to export the text language.", "pages when we click on them. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\",", "default lifespan of the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can", "options when saving to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") # Create a", "#elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧, ٨,", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set", "Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part of", "\"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. # Set the \"dml_effects_rendering_mode\"", "#9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading", "may need access to any custom fonts if we edit the document. options.embed_full_fonts", "the Save method and the PdfSaveOptions class. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "fonts in our document, with their PDF Type 1 equivalents. # Set the", "aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that we can", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "+ \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation()", "#ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling according to metafile size on", "test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode", "to automatically expand all heading level 2 and lower outline entries # and", "0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5 0", "self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: #", "format in the form of a book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\")", "cost of increased file size. # Set the \"export_document_structure\" property to \"False\" to", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Below", "using updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\" # each time we need", "\"False\" to fall back to bitmap when # # it encounters a metafile,", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode", "to not embed any fonts in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR", "click on the arrow of the \"owner\" entry to collapse/expand all its sub-entries.", "841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group", "options = aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties that we can use", "doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor", "pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\",", "builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is", "Only the first two images from the document will be downsampled at this", "PDF contents.\") # Create a \"PdfSaveOptions\" object that we can pass to the", "+ \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3", "color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document", "= aw.saving.PageSet(1) # This document will contain one page starting from page two,", "#self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout", "\"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\",", "the next paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR", "which takes over the monitor's display and has no controls visible. # Set", "font, and \"Courier New\" is a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\")", "to date before saving. # Set the \"update_fields\" property to \"True\" to iterate", "Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically select", "a warning with the specified properties has been generated.\"\"\" # return any(warning for", "builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do", "doc = aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create a \"PdfSaveOptions\" object that", "# doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation", "(aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure", "output PDF in a way that helps us use it to make a", "datetime, timedelta, timezone import aspose.words as aw import aspose.pydrawing as drawing from api_example_base", "# Set the \"create_note_hyperlinks\" property to \"False\" not to have footnote/endnote symbols link", "+ \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode ==", "# to only render the EMF part of an EMF+ dual metafile. #", "R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document", "builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object that we can pass", "pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window", "the \"use_core_fonts\" property to \"True\" to replace some fonts, # including the two", "#pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())", "require raster operations to render in the output PDF. # metafile_rendering_options.emulate_raster_operations = False", "\"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1,", "\"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with more accuracy and also with more", "on the parity of their page numbers. # 1 - Save only the", "document's \"save\" method # # to modify how that method converts the document", "render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document", "= file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6", "to \"EmfPlusDualRenderingMode.EMF\" # to only render the EMF part of an EMF+ dual", "\"rb\") as file: content = file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" +", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\")", "aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "(odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) #pdf_document", "# Clicking on an entry in this outline will take us to the", "to .PDF. options = aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties that we", "once, # and the contents will line up in a way that creates", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\"", "outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\",", "#self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in", "\"create_missing_outline_levels\" property to \"True\" to include all missing levels in the outline, #", "pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400,", "Set the \"memory_optimization\" property to \"True\" to lower the memory footprint of large", "#text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR +", "we export document structure while saving this document. In that case, # we", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read() if export_document_structure: self.assertIn(", "Below are three \"page_set\" properties that we can use to filter out a", "out a set of pages from # our document to save in an", "400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000,", "= aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that we", "self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "write additional text positioning operators. doc = aw.Document(MY_DIR + \"Text positioning operators.docx\") #", "a percentage-based zoom factor when we open the document with it. # Set", "= file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11", "R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company", "save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback):", "0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15", "== source and warning.warning_type == type and warning.description == description) def test_pdf_digital_signature(self): #ExStart", "the \"text_compression\" property to \"PdfTextCompression.NONE\" to not apply any # compression to text", "self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0", "file size. # Set the \"export_document_structure\" property to \"False\" to not export the", "ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count)", "PDF. # The document's size may become very large, but we will have", "builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row()", "page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode)", "aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode", "make sure that all the fields will display # the most accurate values", "raster operation is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects", "displaying the document. # Set the \"interpolate_images\" property to \"False\" to make it", "all footnote/endnote symbols # in the text act as links that, upon clicking,", "convert a whole document to PDF with three levels in the document outline.", "interpolation on images while saving a document to PDF. doc = aw.Document() builder", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\" object that we", "to set the PDF standards compliance level of saved PDF documents. doc =", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1])", "seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout period via the constructor.", "generated PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\")", "options.update_fields = update_fields # We can clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR", "# self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\",", "self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR", "form of a book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a", "to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to # apply a percentage-based zoom", "\"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content = file.read() #if", "We can clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd", "this file # is only intended as a supplement to the documentation, and", "1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason)", "property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the U+0660 to U+0669 range", "rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how", "#ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated PDF document. doc = aw.Document()", "< text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) #", "such as the heading # and the next paragraph via \"View\" -> \"Show/Hide\"", "to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain an", "# to see the interpolation effect if we saved the document with it", "aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback", "(aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata", "/Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68", "# doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) #", "#ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some of the pages in a", "# If an outline entry has subsequent entries of a higher level inbetween", "# Since our document contains a custom font, embedding in the output document", "text in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd", "= text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1,", "\"True\" to get the reader that opens this document to interpolate images. #", "converts the document to .PDF and applies the configuration # # in our", "iterate through all the document # fields and update them before we save", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Configure the", "Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in", "\"use_emf_embedded_to_wmf\" property to \"True\" to render embedded EMF data # for metafiles that", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1])", "default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd", "aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber =", "bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options)", "to modify how that method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions()", "0 R>>>>\", # content) # form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name =", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Since our", "5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + #", "#ExSummary:Shows how to change the resolution of images in the PDF document. doc", "an option from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) #", "property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks that are inside headers/footers.", "to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to", "+ # b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to #", "3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection", "outline of a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) #", "save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain an outline, which", "consumption when rendering large documents to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") #", "(0) 0 (.) 0 ( ) 0 (N) 0 (o) 0 (v) 0", "[0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count)", "all images in the saved PDF. # Aspose.Words will also apply Flate compression", "black and white. # The size of the output document may be larger", "\"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "+ \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources.", "# \"[0 (S) 0 (a) 0 (m) 0 (s) 0 (t) 0 (a)", "a thumbnail for each page in the document. # Set the \"page_mode\" property", "fields will display # the most accurate values in the PDF. options.update_fields =", "R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content)", "def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings' level", "with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property to", "def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some of the", "as \"missing\". # Set the \"create_missing_outline_levels\" property to \"True\" to include all missing", "aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property to \"False\" to fall back to", "U+06F0 to U+06F9 range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\"", "save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback", "pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD),", "699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>\", content) self.assertIn(", "options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content", "aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self):", "and the next paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\".", "glyph of every embedded font in the output PDF. options.embed_full_fonts = True #", "will be downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document =", "the fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0],", "in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export", "the document's \"save\" method # to modify how that method converts the document", "2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" +", "self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode", "(False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure elements,", "part of an EMF+ dual metafile if all of the EMF+ records are", "#ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content =", "doc = aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\" object that", "# This is the preferable option if we know that all our fields", "the output PDF. pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\" property to \"10\"", "Set the \"headings_outline_levels\" property to \"2\" to exclude all headings whose levels are", "include all headings of levels 5 and below in the outline. save_options.outline_options.headings_outline_levels =", "outline, provided that they have a heading level that is no larger than", "with levels of 2, 3, and 4. # The output PDF document will", "that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # The output", "CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: #", "bitmap when # # it encounters a metafile, which will require raster operations", "subsetting when embedding fonts while rendering a document to PDF. doc = aw.Document()", "in the outline, provided that they have a heading level that is no", "as drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def", "ppi. options.downsample_options.resolution_threshold = 128 # Only the first two images from the document", "/DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): #", "interactive objects in the output PDF. # Set the \"preserve_form_fields\" property to \"False\"", "pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression", "export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6 0", "it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR +", "aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠,", "to apply ZIP compression # to text when we save the document to", "# Set the \"preblend_images\" property to \"False\" to render transparent images normally. options.preblend_images", "for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make", "self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document to the PDF format", "#self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with", "will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression ==", "# Set the \"use_emf_embedded_to_wmf\" property to \"True\" to render embedded EMF data #", "builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a", "3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1", "aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure", "the output PDF. # The document's size may become very large, but we", "a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\"", "Set the \"additional_text_positioning\" property to \"False\" to render the document as usual. save_options.additional_text_positioning", "when opening a rendered PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello", "\"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance", "aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12", "84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\",", "when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag = True", "pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR", "to the left of the entry. This entry is the \"owner\" of several", "\"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks that are inside headers/footers. # Set", "some of the pages in a document to PDF. doc = aw.Document() builder", ".PDF. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties", "full-screen mode, which takes over the monitor's display and has no controls visible.", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read() if interpolate_images: self.assertIn(", "when # # it encounters a metafile, which will require raster operations to", "fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR +", "# Set the \"create_missing_outline_levels\" property to \"True\" to include all missing levels in", "we will need to provide the password before accessing its contents. doc.save(ARTIFACTS_DIR +", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Since our document contains a", "(a) 0 (m) 0 (s) 0 (t) 0 (a) -1 (g) 1 (,)", "\"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters: with self.subTest(uri=uri, result=result): doc = aw.Document()", "for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to", "0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\",", "#self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback", "to use the # \"image_compression\" property to control the quality of all images", "content = file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3", "headings in the document body. # Clicking on an entry in this outline", "+ \"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\") #", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) #", "with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes when saving", "\"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\" as", "to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections: section = section.as_section() section.page_setup.multiple_pages =", "with the \"PDF/A-1b\" standard, # which aims to preserve the visual appearance of", "document # fields and update them before we save it as a PDF.", "save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can", "callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster", "document properties as we save the document to .PDF. # Set the \"custom_properties_export\"", "that do not contain any corresponding headings when saving a PDF document. doc", "# metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object that we can", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\"", "10 0 R/Length 11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc =", "When we open this document with a reader such as Adobe Acrobat, we", "# Insert headings that can serve as TOC entries of levels 1, 2,", "serve as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell()", "#self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart", "the \"headings_outline_levels\" property to \"4\" to exclude all headings whose levels are above", "self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10", "uri, result in parameters: with self.subTest(uri=uri, result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc)", "to PDF. # In most cases, the color space will be RGB. #", "+ \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1,", "output PDF. # Set the \"preserve_form_fields\" property to \"False\" to freeze all form", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: #", "options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply our signature", "0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") #", "the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we are rendering the document", "to export Odd pages from the document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "to .PDF. options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of the \"SaveOptions\"", "to manually update them using updating methods such as \"Field.Update()\", and \"Document.UpdateFields()\" #", "== aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file:", "intended as a supplement to the documentation, and is provided # \"as is\",", "for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows", "may be larger with this setting. # Set the \"color_mode\" property to \"NORMAL\"", "# self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber()", "aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type", "partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related warnings", "15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__", "to \"25\" to give the zoom factor a value of 25%. options =", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "a value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25", "self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string())", "\"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read() with io.BytesIO() as stream: image.save(stream) if", "(S) 0 (a) 0 (m) 0 (s) 0 (t) 0 (a) -1 (g)", "this document. In that case, # we can open it using Adobe Acrobat", "document structure to export the text language. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "#ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers in", "to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to", "True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable subsetting", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to", "# Insert headings of levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading", "content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length", "aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") # Insert a combo box which will", "text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format):", "rendering a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name =", "builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object that we can pass to the", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open)", "in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with", "set instructions for some PDF readers to follow when opening an output document.", "\"rb\") as file: content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487", "pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options", "the middle at once, # and the contents will line up in a", "def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression", "how to convert a whole document to PDF with three levels in the", "outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2", "1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object", "Once we print this document on both sides of the pages, we can", "property to \"False\" to exclude all headings within tables, # such as the", "\"digital_signature_details\" object of the \"SaveOptions\" object to # digitally sign the document as", "aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all images in a document that we", "self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR +", "(aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to", "1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as", "pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self):", "R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages", "table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows", "embedding fonts while rendering a document to PDF. doc = aw.Document() builder =", "the fields in a document right before a save operation. # This is", "that all our fields will be up to date before saving. # Set", "output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL:", "builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object that we can pass to the", "ignore missing outline levels, # and treat the outline level 5 headings as", "warning in self.warnings # if warning.source == source and warning.warning_type == type and", "# self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400,", "8, 9, 10, 50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢,", "world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed", "will be RGB. # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use", "document that we save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0,", "to only apply the downsampling to # images with a resolution that is", "b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8", "blank outline entries since there are no usable headings. # Set the \"create_missing_outline_levels\"", "b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85", "== aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85", "\"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator", "doc = aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is a standard font, and", "rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to \"True\" to render embedded EMF data", "# the most accurate values in the PDF. options.update_fields = update_fields # We", "missing outline levels, # and treat the outline level 5 headings as level", "in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to", "aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0", "Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the symbol set from regional", "title\" # Create a \"PdfSaveOptions\" object that we can pass to the document's", "[212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>\", content)", "711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE,", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for", "PDF. # Set the \"compliance\" property to \"PdfCompliance.PDF17\" to comply with the \"1.7\"", "88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type", "aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is", "in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for a in link_annotations if a.annotation_type", "with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure elements, which can", "= aw.SaveFormat.PDF # The output PDF document will contain an outline, which is", "content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents", "When we open this document, we will need to provide the password before", "Adobe Acrobat, we will see the document scaled at 1/4 of its actual", "sub-entries of the second 3rd level entry, and so on. # In the", "aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not be processed. Possibly unsupported", "numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample", "content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( #", "the \"embed_full_fonts\" property to \"False\" to apply subsetting to fonts, saving only the", "property to \"True\" to arrange the contents # in the output PDF in", "the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve the", "self.warnings # if warning.source == source and warning.warning_type == type and warning.description ==", "metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\",", "we need them to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of", "aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for", "property to only apply the downsampling to # images with a resolution that", "property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in the first section's header/footers.", "to include all headings within tables # in the outline, provided that they", "#ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some of the pages in a document", ".PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to \"True\" to get", "\"False\" to # preserve the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts", "image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode):", "object to # digitally sign the document as we render it with the", "0 R /XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272", "can serve as TOC entries of levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1", "self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False, True): with", "Set the \"create_note_hyperlinks\" property to \"False\" not to have footnote/endnote symbols link to", "we can pass to the document's \"save\" method # # to modify how", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if", "in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc =", "test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows", "#ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the PDF standards compliance level of saved", "we click on them. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False)", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "# to preserve custom properties in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else:", "metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object that we can pass", "aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier =", "images with a resolution that is above 128 ppi. options.downsample_options.resolution_threshold = 128 #", "36 # Set the \"resolution_threshold\" property to only apply the downsampling to #", "that can serve as TOC entries of levels 1 and 5. builder.paragraph_format.style_identifier =", "\"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property", "levels are above 2 from the outline. # The last two headings we", "document may be desirable. # Set the \"embed_full_fonts\" property to \"True\" to embed", "which will require raster operations to render in the output PDF. # metafile_rendering_options.emulate_raster_operations", "operation: \" + info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE,", "XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) #", "pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if", "= aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to \"True\" to embed every glyph", "self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in a document we convert", "text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) #", "aw.saving.PdfSaveOptions() # The output PDF document will contain an outline, which is a", "aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use the # \"image_compression\"", "# doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED #", "# self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) <", "720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>\", content) self.assertIn(", "aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all permissions.", "property to \"PdfImageCompression.AUTO\" to use the # \"image_compression\" property to control the quality", "This document will contain one page starting from page two, which will only", "in self.warnings # if warning.source == source and warning.warning_type == type and warning.description", "render it with the \"save\" method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test", "such tags, available via the # \"Content\" navigation pane of Adobe Acrobat at", "and so on. # In the outline, we can click on the arrow", "RGB. # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK", "- Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document", "else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that", "bar pdf title\" # Create a \"PdfSaveOptions\" object that we can pass to", "aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\"", "in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string())", "\"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6", "#ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on a saved PDF document. doc", "level, # an arrow will appear to the left of the entry. This", "(s) 0 (t) 0 (a) -1 (g) 1 (,) 0 ( ) 0", "25 # When we open this document using a reader such as Adobe", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with", "options.page_set = aw.saving.PageSet(1) # This document will contain one page starting from page", "headings whose levels are above 2 from the outline. # The last two", "that method converts the document to .PDF. # Set the \"zoom_behavior\" property to", "all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the editing of", "options = aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to not apply", "# self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title)", "open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN:", "#ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color with saving options property.", "text when we save the document to PDF. The larger the document, the", "treat outline levels 2, 3, and 4 as \"missing\". # Set the \"create_missing_outline_levels\"", "self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart", "#ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign", "#class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo] = [] # def warning(info:", "+ \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks()", "the heading # and the next paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation", "50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥,", "#ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering quality of DrawingML effects in a", "\"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically select the color space for images", "PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is a standard", "to \"True\" to embed every glyph of every embedded font in the output", "#signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0,", "# print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with", "levels 2, 3, and 4 as \"missing\". # Set the \"create_missing_outline_levels\" property to", "do not contain any corresponding headings when saving a PDF document. doc =", "the \"create_missing_outline_levels\" property to \"False\" to ignore missing outline levels, # and treat", "# Set the \"memory_optimization\" property to \"True\" to lower the memory footprint of", "at the cost of increased file size. # Set the \"additional_text_positioning\" property to", "when we click on them. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\",", "images in the saved PDF. # Aspose.Words will also apply Flate compression to", "#ExSummary:Shows an option to optimize memory consumption when rendering large documents to PDF.", "aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79", "self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height)", "an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR", "to \"False\" to apply subsetting to fonts, saving only the glyphs # that", "the arrow of the \"owner\" entry to collapse/expand all its sub-entries. # Set", "options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute", "#ExSummary:Shows how to preblend images with transparent backgrounds while saving a document to", "\"True\" to save form fields as interactive objects in the output PDF. #", "if we edit the PDF. # Set the \"embed_full_fonts\" property to \"False\" to", "CMYK color space for all images in the saved PDF. # Aspose.Words will", "save_options = aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\" property. save_options.encryption_details = encryption_details", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode", "self.warnings[i] # def clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property #", "# to modify how that method converts the document to .PDF. options =", "object that we can pass to the document's \"save\" method # # to", "from the outline. # Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to include all", "save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to \"True\" to lower the", "#self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location)", "12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def", "# callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options)", "property to \"False\" to render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If", "pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to", "three rows. The first row, # whose text we will format in a", "/Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0", "it to PDF. # Set the \"compliance\" property to \"PdfCompliance.PDF17\" to comply with", "= display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title,", "so on. # In the outline, we can click on the arrow of", "#else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7", "via the \"encryption_details\" property. save_options.encryption_details = encryption_details # When we open this document,", "+ \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) # Create", "data # for metafiles that we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf =", "4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE,", "set the \"multiple_pages\" # properties of the page setup objects of all sections", "+ \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a \"PdfSaveOptions\" object that we can", "size. # Set the \"export_document_structure\" property to \"False\" to not export the document", "save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to \"True\"", "converting to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR +", "# Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT,", "0 (e) 0 (r) -1 ( ) 1 (2) -1 (0) 0 (1)", "of the second 4th level outline entry, # the 4th and 5th heading", "that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the", "206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type", "test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf", "size may become very large, but we will have full use of all", "Insert text with PAGE and NUMPAGES fields. These fields do not display the", "images in a document that we are converting to PDF. doc = aw.Document()", "a portion of the document starting from the second page. options.page_set = aw.saving.PageSet(1)", "how to WMF fonts scaling according to metafile size on the page. doc", "any fonts in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options)", "text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a", "\"True\" to lower the memory footprint of large documents' saving operations # at", "page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "standard, # which aims to preserve the visual appearance of the document as", "#1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\",", "in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to", "# tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings", "self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0", "#ExSummary:Shows how to limit the headings' level that will appear in the outline", "signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0", "0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0", "exclude all headings whose levels are above 2 from the outline. # The", "Create a \"PdfSaveOptions\" object that we can pass to the document's \"save\" method", "how to make footnotes and endnotes function as hyperlinks. doc = aw.Document(MY_DIR +", "consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna", "pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with", "builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object that we can pass to", "source: aw.WarningSource, type: aw.WarningType, description: str) -> bool: # \"\"\"Returns True if a", "from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)", "PDF document. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that", "save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to \"True\" to attempt to", "#elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14", "\"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\",", "to modify how that method converts the document to .PDF. pdf_options = aw.saving.PdfSaveOptions()", "property to control the quality of all images that end up in the", "the glyphs # that the document is using. The file will be considerably", "\"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR", "#ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression when saving a", "access to any custom fonts if we edit the document. options.embed_full_fonts = embed_full_fonts", "the EMF part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property", "# Set the \"create_missing_outline_levels\" property to \"False\" to ignore missing outline levels, #", "#pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" + # \"Page 3", "\"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read()", "# Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in", "# Set the \"page_index\" to \"1\" to render a portion of the document", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\"))", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password", "1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading", "to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\" object that", "#ExSummary:Shows how to set permissions on a saved PDF document. doc = aw.Document()", "will need to zoom in on the image # to see the interpolation", "of the metafile on the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\"", "# self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for", "< text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL))", "the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane in the", "# self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type", "collects formatting loss-related warnings that occur upon saving a document.\"\"\" # def __init__(self):", "as file: content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type", "saving only the glyphs # that the document is using. The file will", "pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL,", "\"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count)", "method # to modify how that method converts the document to .PDF. pdf_save_options", "aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode", "\"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a \"PdfSaveOptions\" object that we can pass", "options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR +", "does not apply any interpolation. save_options.interpolate_images = interpolate_images # When we open this", "# b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property", "\"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "+ # b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779", "with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and endnotes function as", "of all images that end up in the output PDF. pdf_save_options.image_compression = pdf_image_compression", "save all hyperlinks using Javascript code # that forces readers to open these", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1]", "save in an output PDF document based on the parity of their page", "#pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to", "not apply PDF Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options)", "visual appearance of the document as Aspose.Words convert it to PDF. # Set", "us to their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to \"False\" not", "\"resolution\" property to \"36\" to downsample all images to 36 ppi. options.downsample_options.resolution =", "reader applies when opening a rendered PDF document. doc = aw.Document() builder =", "ZIP compression # to text when we save the document to PDF. The", "\"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open)", "\"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all headings within tables, # such as", "0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157", "\"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "#if create_note_hyperlinks: # self.assertEqual(8, len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK]))", "table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows", "data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata", "aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only the odd-numbered pages:", "the saved # document in full-screen mode, which takes over the monitor's display", "# Set the \"preserve_form_fields\" property to \"True\" to save form fields as interactive", "the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF document will", "#ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved PDF document", "in the PDF. options.update_fields = update_fields # We can clone PdfSaveOptions objects. options_copy", "only render the EMF part of an EMF+ dual metafile. # Set the", "/Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC:", "the operation. # Set the \"memory_optimization\" property to \"False\" to save the document", "text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "\"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc),", "class. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") #", "to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\" object", "# Set the \"headings_outline_levels\" property to \"4\" to exclude all headings whose levels", "0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def", "258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W", "0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def", "scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False, True): with", "79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5", "is a standard font, and \"Courier New\" is a nonstandard font. builder.font.name =", "aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to \"True\" to arrange the contents #", "[258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>\", content)", "PDF reader to display a separate panel # with a thumbnail for each", "pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in", "#text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption via", "def test_embed_full_fonts(self): for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows", "builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell()", "space will be RGB. # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to", "as Adobe Acrobat Pro, # to display the value of the document's \"title\"", "= aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard # custom", "#ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber)", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder =", "#ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options", "0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0", "options.use_book_fold_printing_settings = render_text_as_bookfold # If we are rendering the document as a booklet,", "font sources to ensure that we have access to both the fonts in", "apply a percentage-based zoom factor when we open the document with it. #", "# 2 - Save only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR +", "to # use glyphs from the U+0660 to U+0669 range as numbers. #", "considerably smaller, # but we may need access to any custom fonts if", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot =", "with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable subsetting when", "text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading", "pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\"))", "documents. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that we", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties", "aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") #", "##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap rendering and changing type of warnings", "Their resolution should be lower than that of the device that is displaying", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain", "the \"save\" method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\",", "set permissions on a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "= aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use the #", "no controls visible. # Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the", "we save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR", "self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0 (a) 0 (m)", "in our MetafileRenderingOptions object to the saving operation. # save_options = aw.saving.PdfSaveOptions() #", "to have footnote/endnote symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\",", "R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0", "document as we export it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "monitor's display and has no controls visible. # Set the \"page_mode\" property to", "#self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode", "belongs to this document. # Set the \"display_doc_title\" to \"False\" to get such", "#ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated PDF document. doc =", "standard. # Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\"", "to this document. # Set the \"display_doc_title\" to \"False\" to get such readers", "test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format", "PDF. # In most cases, the color space will be RGB. # Set", "11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in (False, True): with", "the outline. save_options.outline_options.headings_outline_levels = 5 # This document contains headings of levels 1", "above from the outline. # Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to include", "7, 8, 9, 10, 50, 100\") # Create a \"PdfSaveOptions\" object that we", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "#ExSummary:Shows how to convert a whole document to PDF with three levels in", "\"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the symbol set from regional settings. options.numeral_format", "builder.write(\"Please select a fruit: \") # Insert a combo box which will allow", "image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0", "all hyperlinks using Javascript code # that forces readers to open these links", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\",", "/Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document", "set from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document", "11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber()", "def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart", "\"False\" to save the document as a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR", "pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\"", "the \"update_fields\" property to \"True\" to iterate through all the document # fields", "table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a", "a save operation. # This is the preferable option if we know that", "b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn(", "to # automatically select the color space for images in the document that", "file: content = file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype", "R /XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172", "if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options", "font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown", "Pty Ltd. All Rights Reserved. # # This file is part of Aspose.Words.", "/XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\",", "# Set the \"use_core_fonts\" property to \"True\" to replace some fonts, # including", "two, which will only contain the second page. doc.save(stream, options) #ExEnd #pdf_document =", "#ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated PDF document. doc = aw.Document() builder", "property to \"False\" to freeze all form fields in the document at #", "property to \"False\" to render the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR", "encounters a metafile, which will require raster operations to render in the output", "self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) #", "68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5", "Since our document contains a custom font, embedding in the output document may", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def", "signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in", "import aspose.pydrawing as drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class", "# self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select", "Insert headings that can serve as TOC entries of levels 1 and 5.", "pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\",", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\" object that", "\"Document.UpdateFields()\" # each time we need them to display accurate values. builder.write(\"Page \")", "comply with the \"1.7\" standard. # Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to", "# in the text act as links that, upon clicking, take us to", "for each page in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OC\"", "85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W", "else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type", "rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" +", "\"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look up the locale to determine what", "8 0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\"))", "numbers. # 1 - Save only the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR", "that case, # we can open it using Adobe Acrobat and find tags", "+ \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with", "any interpolation. save_options.interpolate_images = interpolate_images # When we open this document with a", "(aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts):", "aw.DocumentBuilder(doc) # Insert headings that can serve as TOC entries of levels 1", "a \"PdfSaveOptions\" object that we can pass to the document's \"save\" method #", "to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to \"True\" to", "metafile if all of the EMF+ records are supported. # Otherwise, Aspose.Words will", "PDF. The larger the document, the bigger the impact that this will have.", "font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\"", "date before saving. # Set the \"update_fields\" property to \"True\" to iterate through", "file: content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0", "the size of already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options)", "when we open the document with it. # Set the \"zoom_factor\" property to", "+ # \"Page 3 (odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) #pdf_document =", "(odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\" tag", "== aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400,", "for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save", "obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue()))", "open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type", "control the quality of all images that end up in the output PDF.", "tabs. # Set the \"open_hyperlinks_in_new_window\" property to \"False\" to save all hyperlinks normally.", "to \"EMBED_NONE\" to not embed any fonts in the output PDF. options.font_embedding_mode =", "collapse all level and 3 and higher entries when we open the document.", "2 from the outline. # The last two headings we have inserted above", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions", "with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options)", "custom document properties as we save the document to .PDF. # Set the", "certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object that we", "output PDF document will contain an outline, which is a table of contents", "output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to not embed any", "These fields do not display the correct value in real time. # We", "from the U+0660 to U+0669 range as numbers. # Set the \"numeral_format\" property", "interpolate_images # When we open this document with a reader such as Adobe", "ipsum dolor sit amet, consectetur adipiscing elit, \" + \"sed do eiusmod tempor", "a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK)", "0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11", "we know that all our fields will be up to date before saving.", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "up to date before saving. # Set the \"update_fields\" property to \"True\" to", "up in the output PDF. # Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to", "get such readers to display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title =", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property", "(a) -1 (g) 1 (,) 0 ( ) 0 (1) 0 (0) 0", "#ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options when saving", "to PDF. # Set the \"compliance\" property to \"PdfCompliance.PDF17\" to comply with the", "doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum dolor", "on an entry in this outline will take us to the location of", "= aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings of levels 1 to 5.", "aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name", "#text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" +", "= aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\" to render a portion of", "+ \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self):", "Set the \"headings_outline_levels\" property to \"5\" to include all headings of levels 5", "itself and the next entry of the same or lower level, # an", "operation is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting", "\"NumeralFormat.SYSTEM\" to determine the symbol set from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR", "#ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different color space for images in", "[0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2", "options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD:", "options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file:", "aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\" object that we can pass", "#pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode ==", "0 (N) 0 (o) 0 (v) 0 (e) 0 (m) 0 (b) 0", "builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5,", "that forces readers to open these links in new windows/browser tabs. # Set", "= create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\")", "\"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) # Create a", "\"page_index\" to \"1\" to render a portion of the document starting from the", "the \"memory_optimization\" property to \"False\" to save the document as a PDF normally.", "substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name", "according to metafile size on the page. doc = aw.Document(MY_DIR + \"WMF with", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) #", "R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in (False, True):", "#self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how", "# the 4th and 5th heading level entries are sub-entries of the second", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: #", "separate panel # with a thumbnail for each page in the document. #", "len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images", "the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color space for", "# images with a resolution that is above 128 ppi. options.downsample_options.resolution_threshold = 128", "if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289", "١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹", "Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version of", "self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data)", "# Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look up the locale", "to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\" object that", "fields will be up to date before saving. # Set the \"update_fields\" property", "process bookmarks in headers/footers in a document that we are rendering to PDF.", "+ \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1,", "parity of their page numbers. # 1 - Save only the even-numbered pages:", "doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, #", "save it as a PDF. This will make sure that all the fields", "heading level entries are sub-entries of the second 3rd level entry, and so", "# self.assertEqual( # \"[0 (S) 0 (a) 0 (m) 0 (s) 0 (t)", "as file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318", "R>>>>\", # content) # form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field =", "file: content = file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent", "is above 128 ppi. options.downsample_options.resolution_threshold = 128 # Only the first two images", "with the \"1.7\" standard. # Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply", "def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode", "##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap rendering", "document to .PDF. options = aw.saving.PdfSaveOptions() # Since our document contains a custom", "properties while converting a document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\")", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to \"True\" to embed", "2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100\") # Create", "about unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF with", "f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode in", "+ # \"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber =", "builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown fox", "(aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the", "type and warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason", "( ) 0 (1) 0 (0) 0 (.) 0 ( ) 0 (N)", "to save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd", "transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document =", "page numbers. # 1 - Save only the even-numbered pages: options.page_set = aw.saving.PageSet.even", "to \"NORMAL\" to render all images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode =", "endnotes.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "+ \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read() self.assertIn(", "the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to", ".PDF. options = aw.saving.PdfSaveOptions() # Since our document contains a custom font, embedding", "first level of the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 #", "numeral format used when saving to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "to .PDF. save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain an", "whose text we will format in a heading-type style, will serve as the", "when saving to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create", "img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object", "for update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid", "for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts())) # Create", "we will format in a heading-type style, will serve as the column header.", "level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options)", "aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every page: options.page_set =", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note,", "elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content)", "quick brown fox jumps over the lazy dog.\") # Create a \"PdfSaveOptions\" object", "to \"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\",", "be up to date before saving. # Set the \"update_fields\" property to \"True\"", "aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts # that", "# in our MetafileRenderingOptions object to the saving operation. # save_options = aw.saving.PdfSaveOptions()", "# self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F", "all heading level 2 and lower outline entries # and collapse all level", "(False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in a", "# Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts # that format", "at once, # and the contents will line up in a way that", "the default zooming that a reader applies when opening a rendered PDF document.", "io import os from datetime import datetime, timedelta, timezone import aspose.words as aw", "will need to provide the password before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\",", "the document structure to export the text language. doc = aw.Document() builder =", "# self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) <", "doc = aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\" object that we can", "test_memory_optimization(self): for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an", "# callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related warnings that occur", "1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5", "pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how", "content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode", "serve as TOC entries of levels 1, 2, and then 3. builder.paragraph_format.style_identifier =", "(False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the", "the pages, we can fold all the pages down the middle at once,", "aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\" to render a portion of the", "\"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard, # which complies", "strengthen compression at the cost of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR +", "= aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to \"True\" to turn all footnote/endnote", ".PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\" property. save_options.encryption_details =", "#ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some of the pages in", "a document immediately before saving it to PDF. doc = aw.Document() builder =", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB,", "page. options.page_set = aw.saving.PageSet(1) # This document will contain one page starting from", "converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF document", "aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self):", "the outline, # leaving blank outline entries since there are no usable headings.", "property to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard, # which aims to", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images", "# data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0", "10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5", "\", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count)", "# self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name)", "b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157", "end up in the output PDF. pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\"", "builder = aw.DocumentBuilder(doc) # Insert text with PAGE and NUMPAGES fields. These fields", "metafile size on the page. doc = aw.Document(MY_DIR + \"WMF with text.docx\") #", "set the PDF standards compliance level of saved PDF documents. doc = aw.Document(MY_DIR", "reader to display just the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\",", "= aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL #", "٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰", "content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber()", "0 else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object", "aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we open this document using a reader", "XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted", "# at the cost of increasing the duration of the operation. # Set", "#ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering quality of DrawingML effects in", "aw.Document() builder = aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i + 1} ({'odd'", "/URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation()", "apply PDF Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if", "with io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898,", "Set the \"use_emf_embedded_to_wmf\" property to \"True\" to render embedded EMF data # for", "readers, such as Adobe Acrobat Pro, # to display the value of the", "U+0660 to U+0669 range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\"", "self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: #", "self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "the locale to determine what number of glyphs to use. # Set the", "to \"True\" to arrange the contents # in the output PDF in a", "our document, with their PDF Type 1 equivalents. # Set the \"use_core_fonts\" property", "save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count)", "#self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows", "#elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵", "to \"False\" to get such readers to display the document's filename. pdf_save_options =", "\"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts())) #", "preserve the visual appearance of the document as Aspose.Words convert it to PDF.", "def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property", "#ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default zooming that a reader", "class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some", "the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically select the", "using Adobe Acrobat and find tags for elements such as the heading #", "equivalents. # Set the \"use_core_fonts\" property to \"False\" to not apply PDF Type", "pdf_image_compression # Set the \"jpeg_quality\" property to \"10\" to strengthen compression at the", "signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm =", "x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for a in link_annotations if", "0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content", "if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): #", "New\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\") # Create a", "aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to not apply any #", "\\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3", "R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", #", "of increased file size. # Set the \"additional_text_positioning\" property to \"False\" to render", "digitally and timestamp it. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\")", "85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1", "or disable subsetting when embedding fonts while rendering a document to PDF. doc", "headings with levels of 2, 3, and 4. # The output PDF document", "methods such as \"Field.Update()\", and \"Document.UpdateFields()\" # each time we need them to", "self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0", "4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page", "# return len(self.warnings) # def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) ->", "property to \"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR +", "as we save the document to .PDF. # Set the \"custom_properties_export\" property to", "tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows", "saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details =", "self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def", "# Insert text with PAGE and NUMPAGES fields. These fields do not display", "they open new pages when we click on them. doc = aw.Document() builder", "if render_text_as_bookfold: for section in doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING #", "R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode", "format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings = aw.WarningInfoCollection() # def warning(self,", "self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\"", "4, 5, 6, 7, 8, 9, 10, 50, 100\", text_absorber.text) #elif numeral_format ==", "# Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the PDF reader to", "fields as interactive objects in the output PDF. # Set the \"preserve_form_fields\" property", "\"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard # custom document properties as we save", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name)", "use glyphs from the U+0660 to U+0669 range as numbers. # Set the", "below in the outline. save_options.outline_options.headings_outline_levels = 5 # This document contains headings of", "# self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor", "builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier =", "\"Bookmarks in headers and footers.docx\") # Create a \"PdfSaveOptions\" object that we can", "#ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions for some PDF readers to", "\"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream:", "= aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i + 1} ({'odd' if i", "0 (.) 0 ( ) 0 (N) 0 (o) 0 (v) 0 (e)", "of images in the PDF document. doc = aw.Document(MY_DIR + \"Images.docx\") # Create", "to \"PdfTextCompression.NONE\" to not apply any # compression to text when we save", "88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type", "= aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\" property. save_options.encryption_details = encryption_details #", "with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the PDF standards compliance", "pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode):", "/Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content)", ",۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart", "can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd", "= 5 # This document contains headings of levels 1 and 5, and", "#ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options when", "Acrobat Pro, # to display the value of the document's \"title\" built-in property", "doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN'", "just the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name", "created above from the outline. # Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to", "document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello", "#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self):", "Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location)", "images with transparent backgrounds while saving a document to PDF. doc = aw.Document()", "# and the next paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation panes\" ->", "to save a document to the PDF format using the Save method and", "# \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff", "#bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif", "for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat", "contain one page starting from page two, which will only contain the second", "inside tables. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Create a table with", "rows. The first row, # whose text we will format in a heading-type", "file # is only intended as a supplement to the documentation, and is", "been generated.\"\"\" # return any(warning for warning in self.warnings # if warning.source ==", "this document with a reader such as Adobe Acrobat, we will need to", "of its respective heading. # Set the \"headings_outline_levels\" property to \"2\" to exclude", "we print this document on both sides of the pages, we can fold", "headings. # Set the \"create_missing_outline_levels\" property to \"False\" to ignore missing outline levels,", "# form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\",", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "we may need access to any custom fonts if we edit the document.", "property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+ part of an EMF+", "128 # Only the first two images from the document will be downsampled", "display and has no controls visible. # Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\"", "\"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode", "use the # \"image_compression\" property to control the quality of the Jpeg images", "open this document, we will need to provide the password before accessing its", "the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are in all", "for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF", "content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\",", "the \"digital_signature_details\" object of the \"SaveOptions\" object to # digitally sign the document", "options = aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all images in a document", "dog.\") # Configure our font sources to ensure that we have access to", "preserve custom properties within the output PDF document. # Set the \"custom_properties_export\" property", "converts the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property", "how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # By", "clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property # def count(self): #", "b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85", "# Insert a combo box which will allow a user to choose an", "memory consumption when rendering large documents to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\")", "document to PDF. The larger the document, the bigger the impact that this", "render_text_as_bookfold: for section in doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once", "Set the \"jpeg_quality\" property to \"10\" to strengthen compression at the cost of", "size. # Set the \"additional_text_positioning\" property to \"False\" to render the document as", "aw.Document(MY_DIR + \"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\" object that we can", "\"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0", "metafile records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF with image.docx\") #", "file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0", "header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name =", "#else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g) 1", "of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc", "def clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property # def count(self):", "choose an option from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0)", "the \"use_emf_embedded_to_wmf\" property to \"True\" to render embedded EMF data # for metafiles", "Set the \"memory_optimization\" property to \"False\" to save the document as a PDF", "the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\"", "pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\",", "that of the device that is displaying the document. # Set the \"interpolate_images\"", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to \"True\" to", "text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "images. # Their resolution should be lower than that of the device that", "reader does not apply any interpolation. save_options.interpolate_images = interpolate_images # When we open", "3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD", "# Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag", "usable headings. # Set the \"create_missing_outline_levels\" property to \"False\" to ignore missing outline", "# to modify how that method converts the document to .PDF. save_options =", "#link_annotations = [x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for", "12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content)", "builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object that we can", "\"<PASSWORD>\") # The default lifespan of the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds())", "# 1 - Save only the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR +", "we open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document", "location of its respective heading. # Set the \"headings_outline_levels\" property to \"2\" to", "+ \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17:", "as a PDF. This will make sure that all the fields will display", "# return any(warning for warning in self.warnings # if warning.source == source and", "signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows", "0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10", "the document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR +", "= aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all images in a document that", "#ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes when saving to PDF. doc", "TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to", "\"True\" to include all headings within tables # in the outline, provided that", "self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) #", "= bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables", "the \"jpeg_quality\" property to \"10\" to strengthen compression at the cost of image", "#ExSummary:Shows how to specify a compression type for all images in a document", "0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length", "Set the \"use_book_fold_printing_settings\" property to \"True\" to arrange the contents # in the", "sign the document when we save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR +", "= aw.DocumentBuilder(doc) # Insert headings that can serve as TOC entries of levels", "0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "document outline entries for headings inside tables. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints", "(False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1 font", "converting a document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create", "2001-2022 Aspose Pty Ltd. All Rights Reserved. # # This file is part", "\"EMBED_ALL\" to embed all fonts in the output PDF. # Set the \"font_embedding_mode\"", "all fonts in the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\"", "#9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart", "pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: #", "#ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a", "create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes", "#else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self):", "/Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>\",", "8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\",", "0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21,", "are no usable headings. # Set the \"create_missing_outline_levels\" property to \"False\" to ignore", "= interpolate_images # When we open this document with a reader such as", "\"missing\". # Set the \"create_missing_outline_levels\" property to \"True\" to include all missing levels", "\"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks that are inside", "aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox", "with a thumbnail for each page in the document. # Set the \"page_mode\"", "page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS,", "without warranty of any kind, either expressed or implied. import io import os", "how that method converts the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set", "test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level", "R /XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092", "/XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS", "is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related", "\"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR", "self.warnings: List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info)", "with text.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "to \"PdfImageCompression.JPEG\" to use the # \"image_compression\" property to control the quality of", "test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to", "open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content = file.read() #if rendering_mode in", "= aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) # Create a", "٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format", "in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode", "pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"])", "[168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g", "# content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows", "builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox jumps over the lazy", "#self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in", "with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different color space", "# Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are", "images and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\",", "property to \"False\" to not export the document structure. options.export_document_structure = export_document_structure #", "654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5", "\"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create a \"PdfSaveOptions\"", "\"True\" to attempt to fix incorrect # element positioning in the output PDF,", "aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier", "733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5", "100\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style", "to turn all footnote/endnote symbols # in the text act as links that,", "signature to the output document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length", "heading. # Set the \"headings_outline_levels\" property to \"2\" to exclude all headings whose", "# self.warnings.clear() # @property # def count(self): # return len(self.warnings) # def contains(self,", "= pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0,", "Set the \"use_core_fonts\" property to \"False\" to not apply PDF Type 1 fonts.", "aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set the PDF standards", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"text_compression\"", "\"export_document_structure\" property to \"False\" to not export the document structure. options.export_document_structure = export_document_structure", "# Set the \"use_book_fold_printing_settings\" property to \"False\" to render the PDF normally. options.use_book_fold_printing_settings", "with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with transparent backgrounds while", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001)", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber()", "0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): #", "# self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode ==", "\"5\" to include all headings of levels 5 and below in the outline.", "reader to display a separate panel # with a thumbnail for each page", "how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set", "of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document =", "records are supported. # Otherwise, Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode =", "(aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words", "# The last two headings we have inserted above will not appear. save_options.outline_options.headings_outline_levels", "(r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in parameters: with self.subTest(uri=uri,", "save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters = [", "create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks", "\"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding in the output PDF. # Set", "outline, if possible. # Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the", "aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12", "= aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") #", "collapse/expand all its sub-entries. # Set the \"expanded_outline_levels\" property to \"2\" to automatically", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\",", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL))", "options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read() if", "to render all images from the document in black and white. # The", "pages from the document. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in", "to comply with the \"PDF/A-1b\" standard, # which aims to preserve the visual", "\"update_fields\" property to \"False\" to not update all the fields in a document", "page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode", "to embed every glyph of every embedded font in the output PDF. options.embed_full_fonts", "= file.read() if export_document_structure: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0", "0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216,", "may reduce artifacts. # Set the \"preblend_images\" property to \"False\" to render transparent", "5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading", "# self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with", "aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to \"True\" to preblend transparent images #", "pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in", "PDF so that they open new pages when we click on them. doc", "see the interpolation effect if we saved the document with it enabled. doc.save(ARTIFACTS_DIR", "display the title of the document as the title bar. doc = aw.Document()", "pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\",", "possible. # Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the PDF reader", "#table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content", "second 3rd level entry, and so on. # In the outline, we can", "= aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\") #image_placement_absorber =", "export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document", "== aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode", "(False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document to", "converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # The output PDF document", "pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode", "False # # Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to render", "#ExSummary:Shows how to configure the rendering quality of DrawingML effects in a document", "\"1\" to get the outline # to only register headings with heading levels", "612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28,", "the CMYK color space for all images in the saved PDF. # Aspose.Words", "test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to", "with PAGE and NUMPAGES fields. These fields do not display the correct value", "792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count)", "The source code in this file # is only intended as a supplement", "self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\" object", "text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT", "self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False,", "its respective heading. # Set the \"headings_outline_levels\" property to \"1\" to get the", "reader such as Adobe Acrobat, we will need to zoom in on the", "\"Test Signing\", \"Aspose Office\", datetime.now()) # Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings =", "can open it using Adobe Acrobat and find tags for elements such as", "PDF Type 1 font substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name =", "metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the", "of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\" object that", "table of contents that lists headings in the document body. # Clicking on", "info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with", "rendered PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create", "\"Transparent background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object that we can pass", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior", "and higher entries when we open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR +", "pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\",", "+ \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as", "outline, which is a table of contents that lists headings in the document", "need to provide the password before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options)", "as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE:", "Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000,", "in programmatically interpreting our document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style =", "# self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode):", "heading-type style, will serve as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1", "= pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as", "font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that we can pass to", "This file is part of Aspose.Words. The source code in this file #", "= \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown fox jumps", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if", "#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: #", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to", "the symbol set from regional settings. options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options)", "\" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12", "# Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically", "convert only some of the pages in a document to PDF. doc =", "Flate compression to all images and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode =", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: #", "699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>\", content) self.assertIn(", "aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions", "\"encryption_details\" property. save_options.encryption_details = encryption_details # When we open this document, we will", "hyperlinks using Javascript code # that forces readers to open these links in", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" +", "Set the \"open_hyperlinks_in_new_window\" property to \"True\" to save all hyperlinks using Javascript code", "1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing", "in the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only", "compliance level of saved PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\") # Create", "entries of levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier", "using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object", "can set our timeout period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\",", "#ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on a", "]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>\", # content) # form", ".PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to \"True\" to lower", "of its respective heading. # Set the \"headings_outline_levels\" property to \"1\" to get", "# and the contents will line up in a way that creates a", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF #", "< text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) #", "the outline entries from the 5th heading level are sub-entries of the second", "font in the output PDF. options.embed_full_fonts = True # Set the \"font_embedding_mode\" property", "# self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length)", "these links in new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property to \"False\"", "4 0 R>>\", data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\",", "# # in our MetafileRenderingOptions object to the saving operation. # save_options =", "export bookmarks in the first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to", "for images in the document that it converts to PDF. # In most", "aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False,", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance ==", "aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\")", "the rendering quality of DrawingML effects in a document as we save it", "# Set the \"use_book_fold_printing_settings\" property to \"True\" to arrange the contents # in", "in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify", "file: content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype", "\"headings_outline_levels\" property to \"2\" to exclude all headings whose levels are above 2", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1,", "# self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\",", "pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode", "= aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the PDF", "to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with more accuracy and also with", "+ \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content =", "description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows", "builder.writeln(\"The quick brown fox jumps over the lazy dog.\") # Configure our font", "increasing the duration of the operation. # Set the \"memory_optimization\" property to \"False\"", "the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\"", "(b) 0 (e) 0 (r) -1 ( ) 1 (2) -1 (0) 0", "\"25\" to give the zoom factor a value of 25%. options = aw.saving.PdfSaveOptions()", "is displaying the document. # Set the \"interpolate_images\" property to \"False\" to make", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content", "upon saving a document.\"\"\" # def __init__(self): # self.warnings = aw.WarningInfoCollection() # def", "in a document as we save it to PDF. doc = aw.Document(MY_DIR +", "0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST,", "4 (even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows", "\"PdfPageMode.FULL_SCREEN\" to get the PDF reader to open the saved # document in", "#ExFor:NumeralFormat #ExSummary:Shows how to set the numeral format used when saving to PDF.", "#page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a", "aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression", "in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows", "when saving to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\"", "pages from # our document to save in an output PDF document based", "When we open this document using a reader such as Adobe Acrobat, we", "documents to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\" object", "any bookmarks that are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\"", "in the document structure to export the text language. doc = aw.Document() builder", "of saved PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\"", "# metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the \"emulate_raster_operations\" property to \"False\" to", "Aspose.Words. The source code in this file # is only intended as a", "the downsampling to # images with a resolution that is above 128 ppi.", "signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location)", "to change the resolution of images in the PDF document. doc = aw.Document(MY_DIR", "# Set the \"resolution\" property to \"36\" to downsample all images to 36", "open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\",", "folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name ==", "self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties while converting a", "save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator =", "get the PDF reader to display a separate panel # that allows us", "True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory consumption", "R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page", "document in black and white. # The size of the output document may", "document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain", "110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type", "= [] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def", "to control the quality of all images that end up in the output", "# pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False,", "zooming that a reader applies when opening a rendered PDF document. doc =", "for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how", "b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0", "to update all the fields in a document immediately before saving it to", "portion of the document starting from the second page. options.page_set = aw.saving.PageSet(1) #", "into a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is", "that method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output", "# Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the PDF reader to", "\\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter", "the text act as links that, upon clicking, take us to their respective", "\"PdfCustomPropertiesExport.NONE\" to discard # custom document properties as we save the document to", "def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions", "options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber =", "the \"create_note_hyperlinks\" property to \"True\" to turn all footnote/endnote symbols # in the", "render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we are rendering the", "aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object that", "(False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with transparent", "to use the # \"image_compression\" property to control the quality of the Jpeg", "Create a digital signature and assign it to our SaveOptions object to sign", "R /XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415", "self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\" tag in the document", "save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output PDF document will contain", "#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields else \"Page of", "0 R /XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376", "#def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options =", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber()", "R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content)", "saving. # Set the \"update_fields\" property to \"True\" to iterate through all the", "size on the page. doc = aw.Document(MY_DIR + \"WMF with text.docx\") # Create", "tab that belongs to this document. # Set the \"display_doc_title\" to \"False\" to", "page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\",", "the document to .PDF. # Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get", "PDF. # Aspose.Words will also apply Flate compression to all images and ignore", "structure to export the text language. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello", "save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\")", "# including the two fonts in our document, with their PDF Type 1", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every page: options.page_set = aw.saving.PageSet.all", "need to zoom in on the image # to see the interpolation effect", "85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W", "0>>/Dest[5 0 R /XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305", "of contents that lists headings in the document body. # Clicking on an", "0>>/Dest[5 0 R /XYZ 212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\",", "# Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version", "encryption via the \"encryption_details\" property. save_options.encryption_details = encryption_details # When we open this", "1 (, 10. November) -1 ( ) 1 (2) -1 (018)] TJ\", #", "Set the \"export_document_structure\" property to \"True\" to make the document structure, such tags,", "fruit: \") # Insert a combo box which will allow a user to", "٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰", "of the output document may be larger with this setting. # Set the", "builder = aw.DocumentBuilder(doc) # Create a table with three rows. The first row,", "# Configure the \"digital_signature_details\" object of the \"SaveOptions\" object to # digitally sign", "aspose.pydrawing as drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase):", "# save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class", "annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that we", "test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows", "the cost of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd", "Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to render every metafile using", "down the middle at once, # and the contents will line up in", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to \"True\"", "print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i]", "the \"create_note_hyperlinks\" property to \"False\" not to have footnote/endnote symbols link to anything.", "2 == 0 else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a", "#self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\",", "/Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>\",", "Set the \"embed_full_fonts\" property to \"False\" to apply subsetting to fonts, saving only", "doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not be", "to filter out a set of pages from # our document to save", "with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in a document we", "in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on", "#ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options when saving to", "test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default zooming that", "0 (v) 0 (e) 0 (m) 0 (b) 0 (e) 0 (r) -1", "self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True):", "0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if", "bookmarks at the first level of the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level", "in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text", "\"36\" to downsample all images to 36 ppi. options.downsample_options.resolution = 36 # Set", "List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) #", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph()", "self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo] = [] #", "#ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure elements, which can assist in programmatically", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\")", "aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4,", "def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart", "allow the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a", "# self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count)", "larger the document, the bigger the impact that this will have. options.text_compression =", "# Set the \"interpolate_images\" property to \"False\" to make it so that the", "we save it as a PDF. This will make sure that all the", "#if preserve_form_fields: # self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\"", "only contain the second page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\")", "options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all", "via the # \"Content\" navigation pane of Adobe Acrobat at the cost of", "= page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False,", "self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property to \"36\"", "+ \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) <", "\"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to # apply a", "file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3", "0 (1) 0 (8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\"))", "via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url)", "# self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox", "that they have a heading level that is no larger than the value", "themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\",", "Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True save_options.export_language_to_span_tag =", "open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to \"True\" to", "aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE,", "brown fox jumps over the lazy dog.\") # Configure our font sources to", "else: self.assertIn(\"11 0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in", "to optimize memory consumption when rendering large documents to PDF. doc = aw.Document(MY_DIR", "\"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the PDF reader to display just the", "#with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream: #", "embedding in the output document may be desirable. # Set the \"embed_full_fonts\" property", "from the second page. options.page_set = aw.saving.PageSet(1) # This document will contain one", "aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Below are three \"page_set\"", "R /XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033", "68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>\", content) self.assertIn(", "fields. These fields do not display the correct value in real time. #", "value\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "bool: # \"\"\"Returns True if a warning with the specified properties has been", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\"", "aw.saving.PdfSaveOptions() # Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\"", "column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL", "the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document =", "#ExSummary:Shows how to set the default zooming that a reader applies when opening", "self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for", "test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows", "how to create a \"Span\" tag in the document structure to export the", "Set the \"update_fields\" property to \"True\" to iterate through all the document #", "Set the \"preblend_images\" property to \"True\" to preblend transparent images # with a", "rendering to PDF. doc = aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\") #", "\"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\"", "+ b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0", "# Set the \"export_document_structure\" property to \"True\" to make the document structure, such", "of a book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\"", "11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) #", "{i + 1} ({'odd' if i % 2 == 0 else 'even'})\") if", "how to create PDF document outline entries for headings inside tables. doc =", "property to \"10\" to strengthen compression at the cost of image quality. pdf_save_options.jpeg_quality", "self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\",", "open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows", "R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions", "builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading", "= aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): #", "transparent backgrounds while saving a document to PDF. doc = aw.Document() builder =", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()", "standard font, and \"Courier New\" is a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello", "builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object that we can", "aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else", "# and collapse all level and 3 and higher entries when we open", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to", "(False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling according", "self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and endnotes function as hyperlinks.", "aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings of levels 1 to 5. builder.paragraph_format.style_identifier", "#pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5, 6, 7,", "1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object", "footers.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content)", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that", "aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK:", "= aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we open this", "aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\"))", "= aw.DocumentBuilder(doc) # Insert text with PAGE and NUMPAGES fields. These fields do", "signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK):", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\"", "+ \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if", "method converts the document to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\"", "pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use the", "= aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions)", "our timeout period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0,", "4 0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0", "#ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on a saved PDF", "R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0", "# self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17,", "# We can set our timeout period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\",", "property to \"True\" to make the document structure, such tags, available via the", "\"My value\") # Create a \"PdfSaveOptions\" object that we can pass to the", "2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks", "the document as a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options)", "= aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to \"True\" to preblend transparent images", "\"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode", "pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3,", "field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) #", "\"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property # def count(self): # return len(self.warnings)", "text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows", "\"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within the output PDF", "self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR +", "#ExSummary:Shows how to WMF fonts scaling according to metafile size on the page.", "options.export_document_structure = export_document_structure # Suppose we export document structure while saving this document.", "the document to PDF. The larger the document, the bigger the impact that", "levels in the outline, # leaving blank outline entries since there are no", "pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0),", "operation. # This is the preferable option if we know that all our", "the outline of a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "1 - Save only the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\",", "ut labore et dolore magna aliqua.\") # Create a \"PdfSaveOptions\" object that we", "R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata", "type: aw.WarningType, description: str) -> bool: # \"\"\"Returns True if a warning with", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "#pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN,", "self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a", "#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif", "47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string())", "with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the numeral format used", "digitally sign the document as we render it with the \"save\" method. signing_time", "in parameters: with self.subTest(uri=uri, result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri,", "the document is using. The file will be considerably smaller, # but we", "#ExSummary:Shows how to set the numeral format used when saving to PDF. doc", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "PDF document based on the parity of their page numbers. # 1 -", "self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED,", "line up in a way that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options)", "supported. # Otherwise, Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode #", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations =", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # The output PDF", "\"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "is\", without warranty of any kind, either expressed or implied. import io import", "= aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content =", "document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "Set the \"interpolate_images\" property to \"False\" to make it so that the reader", "simplified version of DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format", "normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters =", "to \"1\" to display all # bookmarks at the first level of the", "0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" +", "options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\")", "= aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table()", "\"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, #", "tables. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Create a table with three", "world!\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\",", "\"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part of an EMF+", "#ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and endnotes function as hyperlinks. doc =", "R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type", "that a reader applies when opening a rendered PDF document. doc = aw.Document()", "positioning in the output PDF, should there be any, at the cost of", "will format in a heading-type style, will serve as the column header. builder.start_table()", "Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european numerals. # Set the", "the size of the metafile on the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property", "the pages down the middle at once, # and the contents will line", "Save method and the PdfSaveOptions class. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please", "how that method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set", "\"Page 3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" + # \"Page 5 (odd)\",", "from the outline. # The last two headings we have inserted above will", "\"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\" object that we can pass to", "#elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())", "which may reduce artifacts. # Set the \"preblend_images\" property to \"False\" to render", "pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\",", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # By default, Aspose.Words", "# def __init__(self): # self.warnings: List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo): #", "1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that we can pass to", "method # to modify how that method converts the document to .PDF. save_options", "before saving. # Set the \"update_fields\" property to \"True\" to iterate through all", "8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn(", "0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else:", "display # the most accurate values in the PDF. options.update_fields = update_fields #", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_book_fold_printing_settings\" property to \"True\"", "# Set the \"display_doc_title\" to \"True\" to get some PDF readers, such as", "aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "immediately before saving it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) #", "builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that we can pass to the", "\"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\"", "#action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with", "# aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4", "to comply with the \"1.7\" standard. # Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\"", "= aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties that we can use to", "to render fallback shapes when saving to PDF. doc = aw.Document(MY_DIR + \"DrawingML", "+ \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'),", "export the text language. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola", "== aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self):", "# self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\", # callback.warnings[0].description)", "bookmarks in the first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\"", "\"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def", "there be any, at the cost of increased file size. # Set the", "tables, # such as the one we have created above from the outline.", "if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info) ##ExEnd", "= CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8') if", "fall back to bitmap when # # it encounters a metafile, which will", "self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #", "#9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks", "\"SaveOptions\" object to # digitally sign the document as we render it with", "api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set", "#ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers in a", "aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that we can pass to", "outline entries since there are no usable headings. # Set the \"create_missing_outline_levels\" property", "converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property", "any custom fonts if we edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR +", "aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "# Set the \"create_note_hyperlinks\" property to \"True\" to turn all footnote/endnote symbols #", "aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions for some", "of the \"SaveOptions\" object to # digitally sign the document as we render", "appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR", "in the PDF document. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\"", "#self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart", "#5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\",", "modify how that method converts the document to .PDF. # Set the \"zoom_behavior\"", "aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width)", "PdfSaveOptions class. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \")", "a document to the PDF format in the form of a book fold.", "# render DrawingML effects with more accuracy and also with more processing cost.", "to .PDF. options = aw.saving.PdfSaveOptions() # Since our document contains a custom font,", "enable/disable PDF Type 1 font substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name", "setting. # Set the \"color_mode\" property to \"NORMAL\" to render all images in", "property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color space for all images", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to", "property to \"PdfCompliance.PDF17\" to comply with the \"1.7\" standard. # Set the \"compliance\"", "b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a \"PdfSaveOptions\" object that", "= pdf_image_compression # Set the \"jpeg_quality\" property to \"10\" to strengthen compression at", "the first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to #", "test_embed_core_fonts(self): for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable", "PDF. # Set the \"embed_full_fonts\" property to \"False\" to apply subsetting to fonts,", "== aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file:", "embedding in the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to", "apply Flate compression to all images and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode", "fonts' embedding in the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONE\"", "numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴", "4th level outline entry, # the 4th and 5th heading level entries are", "contains a custom font, embedding in the output document may be desirable. #", "1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\")", "self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif", "/Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS", "+ b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature)", "i in range(5): builder.writeln(f\"Page {i + 1} ({'odd' if i % 2 ==", "# The output PDF document will treat outline levels 2, 3, and 4", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for", "100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set our timeout period via the", "the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document", "# to use the CMYK color space for all images in the saved", "+ \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset)", "MetafileRenderingOptions object to the saving operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options =", "PDF document will treat outline levels 2, 3, and 4 as \"missing\". #", "of every embedded font in the output PDF. # The document's size may", "in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and", "aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to \"True\" to embed every glyph of", "to process bookmarks in headers/footers in a document that we are rendering to", "forces readers to open these links in new windows/browser tabs. # Set the", "data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages", "# 3 - Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options)", "8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif", "PDF document. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom", "aw.WarningInfo: # return self.warnings[i] # def clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear()", "+ \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for", "which will only contain the second page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document", "the \"display_doc_title\" to \"False\" to get such readers to display the document's filename.", "# self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type())", "# with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO,", "#self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window", "= aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute DML", "79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "/Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>\",", "\"display_doc_title\" to \"False\" to get such readers to display the document's filename. pdf_save_options", "#8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\",", "# and treat the outline level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels =", "aw.WarningSource, type: aw.WarningType, description: str) -> bool: # \"\"\"Returns True if a warning", "#self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold", "PDF reader # also to display the outline, if possible. # Set the", "it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test", "\" + \"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\")", "a simplified version of DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\"", "permissions on a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello", "to use the CMYK color space for all images in the saved PDF.", "text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__", "converts the document to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property", "for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve", "use of all fonts if we edit the PDF. # Set the \"embed_full_fonts\"", "\") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create", "\"embed_full_fonts\" property to \"True\" to embed every glyph of every embedded font in", "saving to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create a", "707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "the PDF document. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object", "panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\")", "that we can use to filter out a set of pages from #", "# self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE,", ") 0 (N) 0 (o) 0 (v) 0 (e) 0 (m) 0 (b)", "options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we open this document using", "has no controls visible. # Set the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get", "# Set the \"emulate_raster_operations\" property to \"False\" to fall back to bitmap when", "pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB,", "document structure while saving this document. In that case, # we can open", "update_fields # We can clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\",", "self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content)", "= aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the outline", "the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to \"True\"", "value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart", "saving options property. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object", "# Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in", "will line up in a way that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\",", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression", "case, # we can open it using Adobe Acrobat and find tags for", "PDF document outline entries for headings inside tables. doc = aw.Document() builder =", "= aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that we", "True # Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed all fonts in", "self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5", "signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason)", "numbers. # Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european numerals. #", "effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode", "to exclude all headings whose levels are above 4 from the outline. options.outline_options.headings_outline_levels", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to \"True\"", "\"\"\"Prints and collects formatting loss-related warnings that occur upon saving a document.\"\"\" #", "400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) #", "== aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False,", "[] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self,", "automatically expand all heading level 2 and lower outline entries # and collapse", "signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url", "+ \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR +", "else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables):", "document to .PDF. options = aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all images", "footprint of large documents' saving operations # at the cost of increasing the", "converts the document to .PDF. # Set the \"display_doc_title\" to \"True\" to get", "\"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7", "builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create", "builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt", "fonts # that format text within WMF images according to the size of", "open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document =", "as file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages", "\"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type", "vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "0>>/Dest[5 0 R /XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305", "saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) img =", "12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) #", "property to \"1\" to display all # bookmarks at the first level of", "outline, # leaving blank outline entries since there are no usable headings. #", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: #", "for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how", "\"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "the # \"Content\" navigation pane of Adobe Acrobat at the cost of increased", "self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure", "discard # custom document properties as we save the document to .PDF. #", "aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\"", "signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0", "on. # In the outline, we can click on the arrow of the", "footnotes and endnotes function as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\")", "in a document as we export it to PDF. doc = aw.Document() builder", "custom fonts if we edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\",", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with", "in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize", "for elements such as the heading # and the next paragraph via \"View\"", "attempt to fix incorrect # element positioning in the output PDF, should there", "we will need to zoom in on the image # to see the", "obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11", "some fonts, # including the two fonts in our document, with their PDF", "# b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0 R/Contents 6 0", "subsetting to fonts, saving only the glyphs # that the document is using.", "if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore", "# \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property # def count(self): # return", "sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections: section = section.as_section() section.page_setup.multiple_pages", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3,", "elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read() if open_hyperlinks_in_new_window:", "of several such \"sub-entries\". # In our document, the outline entries from the", "/XYZ 258 711 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS", "806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string())", "aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2]", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)", "font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\",", "we can fold all the pages down the middle at once, # and", "sign a generated PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of", "page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve the default", "hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR +", "numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩,", "the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects with more accuracy", "drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self):", "property to control the quality of the Jpeg images that end up in", "= file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata", "content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field()", "PDF, should there be any, at the cost of increased file size. #", "that, upon clicking, take us to their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\"", "5th heading level are sub-entries of the second 4th level outline entry, #", "[\"Apple\", \"Banana\", \"Cherry\"], 0) # Create a \"PdfSaveOptions\" object that we can pass", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content", "#text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" +", "right before a save operation. # This is the preferable option if we", "how to display the title of the document as the title bar. doc", "images that end up in the output PDF. # Set the \"image_compression\" property", "save the document to PDF. # Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to", "pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False,", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading", "to not apply PDF Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\",", "aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC, aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the", "# Create a \"PdfSaveOptions\" object that we can pass to the document's \"save\"", "Set the \"create_missing_outline_levels\" property to \"True\" to include all missing levels in the", "\"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document =", "+ \"Bookmarks in headers and footers.docx\") # Create a \"PdfSaveOptions\" object that we", "\"image_compression\" property to \"PdfImageCompression.AUTO\" to use the # \"image_compression\" property to control the", "\"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode ==", "level that is no larger than the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables", "aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\") # Create a \"PdfSaveOptions\" object that", "numerals. # Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the symbol set", "to \"GRAYSCALE\" to render all images from the document in black and white.", "= aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback =", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\"", "ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to", "property to \"False\" not to have footnote/endnote symbols link to anything. options.create_note_hyperlinks =", "self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior", "save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()", "text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader # also to", "(False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title of", "TOC entries of levels 1, 2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading)", "in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows how to set", "method # to modify how that method converts the document to .PDF. #", "\"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read()", "option from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\", \"Banana\", \"Cherry\"], 0) # Create", "\"EMF.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts =", "positioning operators.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "(aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" +", "that the reader does not apply any interpolation. save_options.interpolate_images = interpolate_images # When", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts", "are supported. # Otherwise, Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Below are three", "# self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0 R/Contents", "٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰", "= aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown", "the document at # their current values and display them as plain text", "with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression type", "Arial and Times New Roman fonts into a PDF document. doc = aw.Document()", "of levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier =", "1, 2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier =", "to the output document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR", "\"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3,", "signature and assign it to our SaveOptions object to sign the document when", "pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB,", "larger with this setting. # Set the \"color_mode\" property to \"NORMAL\" to render", "accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello", "for x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for a in link_annotations", "+ \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) #", "header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode", "0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\",", "headings of levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier", "for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform", "\"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB,", "EMF data # for metafiles that we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf", "b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0", "warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode", "effects with more accuracy and also with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode", "0 R /XYZ 212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content)", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style =", "aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page", "apply subsetting to fonts, saving only the glyphs # that the document is", "with more accuracy and also with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML,", "of large documents' saving operations # at the cost of increasing the duration", "(False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text positioning", "\"text_compression\" property to \"PdfTextCompression.NONE\" to not apply any # compression to text when", "that are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to #", "\"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3,", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to", "options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply", "pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: #", "# @property # def count(self): # return len(self.warnings) # def contains(self, source: aw.WarningSource,", "supplement to the documentation, and is provided # \"as is\", without warranty of", "# Set the \"update_fields\" property to \"False\" to not update all the fields", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to", "file: content = file.read() with io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0", "arrange the contents # in the output PDF in a way that helps", "inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export", "level of the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set", "# self.save_warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.WarningType ==", "+ \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as", "save the document to .PDF. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" #", "timedelta, timezone import aspose.words as aw import aspose.pydrawing as drawing from api_example_base import", "option to optimize memory consumption when rendering large documents to PDF. doc =", "= aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text) #pdf_document", "[0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "= doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod", "and white. # The size of the output document may be larger with", "#ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for", "output document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature)", "document. In that case, # we can open it using Adobe Acrobat and", "self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text)", "#bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels else", "to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks that are inside headers/footers. #", "# self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def", "# save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback #", "link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to", "#ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on a saved PDF document.", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read() if pdf_custom_properties_export_mode", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to \"True\" to", "upon clicking, take us to their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that can serve", "as file: content = file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type", "# CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50,", "with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document to the PDF", "if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\")", "Set the \"interpolate_images\" property to \"True\" to get the reader that opens this", "Insert a combo box which will allow a user to choose an option", "# it encounters a metafile, which will require raster operations to render in", "options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\")", "content) else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents 6", "to render embedded EMF data # for metafiles that we can render as", "0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection =", "self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\", # callback.warnings[0].description) #class", "+ \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content =", "# that forces readers to open these links in new windows/browser tabs. #", "# Set the \"interpolate_images\" property to \"True\" to get the reader that opens", "as well as preserving the document structure of the original document. # This", "The document's size may become very large, but we will have full use", "aw.saving.NumeralFormat.EUROPEAN, aw.saving.NumeralFormat.SYSTEM): with self.subTest(numeral_forma=numeral_format): #ExStart #ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the numeral", "property to \"EMBED_NONE\" to not embed any fonts in the output PDF. options.font_embedding_mode", "how that method converts the document to .PDF. # Set the \"zoom_behavior\" property", "= 4 # If an outline entry has subsequent entries of a higher", "+ \"Rendering.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "(v) 0 (e) 0 (m) 0 (b) 0 (e) 0 (r) -1 (", "\"memory_optimization\" property to \"False\" to save the document as a PDF normally. save_options.memory_optimization", "embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the", "outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self):", "outline # to only register headings with heading levels that are no larger", "outline. options.outline_options.headings_outline_levels = 4 # If an outline entry has subsequent entries of", "#ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved PDF", "/UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) #", "make it so that the reader does not apply any interpolation. save_options.interpolate_images =", "< text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to", "+ b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI", "options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to \"True\" to save all", "bar. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar", "additional text positioning operators. doc = aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_index\" to", "self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def", "#ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels #ExSummary:Shows how to convert a whole document to", "will see the document scaled at 1/4 of its actual size. doc.save(ARTIFACTS_DIR +", "the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects. # Set the", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Create a", "Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts", "of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that", "will take us to the location of its respective heading. # Set the", "In most cases, the color space will be RGB. # Set the \"image_color_space_export_mode\"", "at the cost of increasing the duration of the operation. # Set the", "\"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in an XMP packet.", "doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR", "to \"2\" to automatically expand all heading level 2 and lower outline entries", "pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF document will contain an outline, which", "a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that", "data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\")", "the color space will be RGB. # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\"", "properties as we save the document to .PDF. # Set the \"custom_properties_export\" property", "1, 1, 0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\",", "#ExSummary:Shows how to create a \"Span\" tag in the document structure to export", "= update_fields # We can clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR +", "# self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count)", "+ \"Transparent background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object that we can", "# Set the \"expanded_outline_levels\" property to \"2\" to automatically expand all heading level", "of levels 5 and below in the outline. save_options.outline_options.headings_outline_levels = 5 # This", "incorrect # element positioning in the output PDF, should there be any, at", "consectetur adipiscing elit, \" + \"sed do eiusmod tempor incididunt ut labore et", "== aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self):", "# to substitute DML shapes with their fallback shapes. # Set the \"dml_rendering_mode\"", "way that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\",", "+ \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create a", "(aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes", "R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0,", "pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution", "print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts):", "#ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated PDF", "outline levels that do not contain any corresponding headings when saving a PDF", "the text language. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\")", "= True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with", "separate panel # that allows us to work with any layers present in", "we convert to PDF so that they open new pages when we click", "#ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with outline levels that do not contain", "get Aspose.Words to # automatically select the color space for images in the", "every glyph of every embedded font in the output PDF. options.embed_full_fonts = True", "aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to \"True\" to attempt", "# but we may need access to any custom fonts if we edit", "def test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the", "such readers to display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title", "the U+06F0 to U+06F9 range as numbers. # Set the \"numeral_format\" property to", "compression to all images and ignore the \"image_compression\" property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode", "embed all fonts in the output PDF. # Set the \"font_embedding_mode\" property to", "to .PDF. options = aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all images in", "the output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\"", "# self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count)", "\"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a \"PdfSaveOptions\" object that we can pass", "document contains headings of levels 1 and 5, and no headings with levels", "subsequent entries of a higher level inbetween itself and the next entry of", "#ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(6 if create_missing_outline_levels", "the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options)", "aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file:", "get the PDF reader # also to display the outline, if possible. #", "\"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber", "either expressed or implied. import io import os from datetime import datetime, timedelta,", "or implied. import io import os from datetime import datetime, timedelta, timezone import", "the document that it converts to PDF. # In most cases, the color", "timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan of the timestamp", "/Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>\", content)", "# content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count)", "memory footprint of large documents' saving operations # at the cost of increasing", "self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3", "self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "the document to .PDF. # Set the \"display_doc_title\" to \"True\" to get some", "\"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\") # Create", "\") # Insert a combo box which will allow a user to choose", "second page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber", "٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: #", "R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages", "value in real time. # We will need to manually update them using", "# \"Image can not be processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): #", "how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # The", "the document with it. # Set the \"zoom_factor\" property to \"25\" to give", "with this setting. # Set the \"color_mode\" property to \"NORMAL\" to render all", "#ExSummary:Shows how to sign a generated PDF document. doc = aw.Document() builder =", "+ \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb')", "233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\",", "612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for", "we saved the document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with", "The default lifespan of the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We", "a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber", "builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit,", "the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+ part of", "\"False\" to not update all the fields in a document right before a", "# We can clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options)", "to the PDF format in the form of a book fold. doc =", "ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows how to convert only some of", "builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object that we can pass to", "the output PDF. options.embed_full_fonts = True # Set the \"font_embedding_mode\" property to \"EMBED_ALL\"", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_mode\"", "#ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory consumption when rendering large documents to", "# Set the \"memory_optimization\" property to \"False\" to save the document as a", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self):", "with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color with", "+ \"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF", "in headers/footers in a document that we are rendering to PDF. doc =", "self.subTest(uri=uri, result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR +", "# self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart", "\"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in the first section's", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # The output", "sub-entries of the second 4th level outline entry, # the 4th and 5th", "to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to", "#with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\") as file: # content = file.read() #if effects_rendering_mode", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property", "# self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ", "comply with the \"PDF/A-1a\" standard, # which complies with \"PDF/A-1b\" as well as", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\"", "aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8,", "every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with", "R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0", "self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "set Aspose.Words to skip embedding Arial and Times New Roman fonts into a", "the \"resolution_threshold\" property to only apply the downsampling to # images with a", "be processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings", "#ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated PDF document. doc", "0 R /XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327", "#self.assertEqual(\"Page 1 of 2\" if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self):", "we edit the PDF. # Set the \"embed_full_fonts\" property to \"False\" to apply", "save_options.interpolate_images = interpolate_images # When we open this document with a reader such", "= pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for", "nonstandard fonts' embedding in the output PDF. # Set the \"font_embedding_mode\" property to", "text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set", "= file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8", "(c) 2001-2022 Aspose Pty Ltd. All Rights Reserved. # # This file is", "the PDF reader to display just the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR", "tags for elements such as the heading # and the next paragraph via", "to \"False\" to fall back to bitmap when # # it encounters a", "aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream)", "#table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode", "aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that we can pass to the document's", "builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\" object that we can pass to", "property to \"NumeralFormat.SYSTEM\" to determine the symbol set from regional settings. options.numeral_format =", "Create a table with three rows. The first row, # whose text we", "1 # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any", "values in the PDF. options.update_fields = update_fields # We can clone PdfSaveOptions objects.", "permissions to allow the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY #", ".PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get", "know that all our fields will be up to date before saving. #", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that", "dolore magna aliqua.\") # Create a \"PdfSaveOptions\" object that we can pass to", "\"Courier New\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\") # Create", "\"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color space for all", "the outline navigation pane in the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set", "size of the output document may be larger with this setting. # Set", "- Save only the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options)", "get a PDF reader to # apply a percentage-based zoom factor when we", "higher level inbetween itself and the next entry of the same or lower", "the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get the PDF reader to display a", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector)", "in the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to not", "to \"True\" to attempt to fix incorrect # element positioning in the output", "at the first level of the outline in the output PDF. save_options.outline_options.default_bookmarks_outline_level =", "for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create", "document to .PDF. # Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to get a", "in the form of a book fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") #", "make footnotes and endnotes function as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and", "\"title\" built-in property in the tab that belongs to this document. # Set", ".PDF and applies the configuration # # in our MetafileRenderingOptions object to the", "may significantly increase the size of already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR", "(even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" + #", "of 2\" if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields", "options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\")", "/Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\",", "PDF. options.embed_full_fonts = True # Set the \"font_embedding_mode\" property to \"EMBED_ALL\" to embed", "to include all missing levels in the outline, # leaving blank outline entries", "\"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3,", "to display just the document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options)", "self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "#ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\" tag in the document structure to", "as stream: # Create a \"PdfSaveOptions\" object that we can pass to the", "jumps over the lazy dog.\") # Configure our font sources to ensure that", "save_options.save_format = aw.SaveFormat.PDF # The output PDF document will contain an outline, which", "\"PdfPageMode.USE_OUTLINES\" to get the PDF reader # also to display the outline, if", "property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically select the color space", "from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart", "to # use glyphs from the U+06F0 to U+06F9 range as numbers. #", "0 (o) 0 (v) 0 (e) 0 (m) 0 (b) 0 (e) 0", "Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP compression # to text", "#self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False, True): with", "output document may be larger with this setting. # Set the \"color_mode\" property", "as file: # content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): #", "\"update_fields\" property to \"True\" to iterate through all the document # fields and", "True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in a document", "before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR", "False) # Create a \"PdfSaveOptions\" object that we can pass to the document's", "#self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(datetime(1, 1, 1,", "0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11", "the \"open_hyperlinks_in_new_window\" property to \"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window", "ppi. options.downsample_options.resolution = 36 # Set the \"resolution_threshold\" property to only apply the", "# self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title)", "PDF readers to follow when opening an output document. doc = aw.Document() builder", "the outline # to only register headings with heading levels that are no", "method will apply our signature to the output document at this time. doc.save(ARTIFACTS_DIR", "fields do not display the correct value in real time. # We will", "callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) #", "Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to not apply any # compression to", "13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def", "thumbnail for each page in the document. # Set the \"page_mode\" property to", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream)", "the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard, # which", "all the fields will display # the most accurate values in the PDF.", "on the arrow of the \"owner\" entry to collapse/expand all its sub-entries. #", "aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC, aw.saving.NumeralFormat.CONTEXT, aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC,", "fix incorrect # element positioning in the output PDF, should there be any,", "return any(warning for warning in self.warnings # if warning.source == source and warning.warning_type", "generated.\"\"\" # return any(warning for warning in self.warnings # if warning.source == source", "= aw.Document() builder = aw.DocumentBuilder(doc) # Create a table with three rows. The", "pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all", "file: # content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0,", "effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML effects", "document to .PDF and applies the configuration # # in our MetafileRenderingOptions object", "image color with saving options property. doc = aw.Document(MY_DIR + \"Images.docx\") # Create", "to their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to \"False\" not to", "True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure elements, which", "to attempt to fix incorrect # element positioning in the output PDF, should", "a \"Span\" tag in the document structure to export the text language. doc", "the first two images from the document will be downsampled at this stage.", "nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick", "outline. # The last two headings we have inserted above will not appear.", "# self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart", "only allow nonstandard fonts' embedding in the output PDF. # Set the \"font_embedding_mode\"", "such as \"Field.Update()\", and \"Document.UpdateFields()\" # each time we need them to display", "info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info) ##ExEnd def", "configure the rendering quality of DrawingML effects in a document as we save", "self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in", "PDF format in the form of a book fold. doc = aw.Document(MY_DIR +", "the document structure, such tags, available via the # \"Content\" navigation pane of", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is a standard font,", "to \"True\" to preblend transparent images # with a background, which may reduce", "use it to make a booklet. # Set the \"use_book_fold_printing_settings\" property to \"False\"", "the location of its respective heading. # Set the \"headings_outline_levels\" property to \"2\"", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self):", "== aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content)", "have footnote/endnote symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options)", "in a way that helps us use it to make a booklet. #", "large documents' saving operations # at the cost of increasing the duration of", "#ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document outline entries for headings inside tables.", "-1 (g) 1 (,) 0 ( ) 0 (1) 0 (0) 0 (.)", "len(self.warnings) # def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool: #", "self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self):", "# self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + #", "# self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if", "def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how", "as file: content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: #", "in new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property to \"False\" to save", "color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE:", "and treat the outline level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels", "clicking, take us to their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type", "will treat outline levels 2, 3, and 4 as \"missing\". # Set the", "saving a document.\"\"\" # def __init__(self): # self.warnings = aw.WarningInfoCollection() # def warning(self,", "operators. doc = aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create a \"PdfSaveOptions\" object", "Acrobat, we will need to zoom in on the image # to see", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to", "properties within the output PDF document. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\"", "2\" if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in", "to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color space for all images in", "aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes", "the \"export_document_structure\" property to \"True\" to make the document structure, such tags, available", "file will be considerably smaller, # but we may need access to any", "level and 3 and higher entries when we open the document. options.outline_options.expanded_outline_levels =", "open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read())", "#ExSummary:Show how to write additional text positioning operators. doc = aw.Document(MY_DIR + \"Text", "large documents to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\"", "embedded font in the output PDF. options.embed_full_fonts = True # Set the \"font_embedding_mode\"", "# self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count)", "Reserved. # # This file is part of Aspose.Words. The source code in", "binary raster operation is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and", "pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document =", "\"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd", "\"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR +", "# In our document, the outline entries from the 5th heading level are", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to \"True\" to replace", "#ExSummary:Shows how to save hyperlinks in a document we convert to PDF so", "range(5): builder.writeln(f\"Page {i + 1} ({'odd' if i % 2 == 0 else", "pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read() with io.BytesIO()", "work with any layers present in the document. # Set the \"page_mode\" property", "b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector", "= aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def", "and update them before we save it as a PDF. This will make", "pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart", "self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded)", "#ExSummary:Shows how to preserve document structure elements, which can assist in programmatically interpreting", "\"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4 (even)\",", "when saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) for", "# if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception):", "world!\") builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\")", "timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan of", "0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258", "with making documents searchable but may significantly increase the size of already large", "using the Save method and the PdfSaveOptions class. doc = aw.Document() builder =", "self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0 R/Contents 6", "self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string())", "options.numeral_format = numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\")", "in the outline, # leaving blank outline entries since there are no usable", "elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\")", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\")", "sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name)", "== type and warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location", "[0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def", "automatically select the color space for images in the document that it converts", "the \"zoom_factor\" property to \"25\" to give the zoom factor a value of", "in a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\")", "#ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default zooming that a reader applies", "builder = aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i + 1} ({'odd' if", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the \"resolution\" property to \"36\" to downsample", "Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only render the EMF part", "+ \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read() with io.BytesIO() as stream: image.save(stream)", "level are sub-entries of the second 4th level outline entry, # the 4th", "the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts # that format text within", "property in the tab that belongs to this document. # Set the \"display_doc_title\"", "= file.read() with io.BytesIO() as stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \",", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content", "an output PDF document based on the parity of their page numbers. #", "loss-related warnings that occur upon saving a document.\"\"\" # def __init__(self): # self.warnings", "method converts the document to .PDF. # Set the \"color_mode\" property to \"GRAYSCALE\"", "Start by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only the odd-numbered pages: options.page_set", "the \"additional_text_positioning\" property to \"True\" to attempt to fix incorrect # element positioning", "the output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to \"1\"", "#ExFor:FixedPageSaveOptions.numeral_format #ExFor:NumeralFormat #ExSummary:Shows how to set the numeral format used when saving to", "25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we", "leaving blank outline entries since there are no usable headings. # Set the", "\"\"\"Returns True if a warning with the specified properties has been generated.\"\"\" #", "(False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option to optimize memory", "levels that do not contain any corresponding headings when saving a PDF document.", "to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically select the color space for", "file: content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0", "the document to .PDF. options = aw.saving.PdfSaveOptions() # By default, Aspose.Words downsample all", "include all missing levels in the outline, # leaving blank outline entries since", "missing levels in the outline, # leaving blank outline entries since there are", "= aw.saving.PdfSaveOptions() # Set the \"interpolate_images\" property to \"True\" to get the reader", "options = aw.saving.PdfSaveOptions() # Since our document contains a custom font, embedding in", "converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\" property", "images that end up in the output PDF. pdf_save_options.image_compression = pdf_image_compression # Set", "to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\")", "content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK,", "#ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\",", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype", "link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68", "test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how", "if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0", "respective heading. # Set the \"headings_outline_levels\" property to \"1\" to get the outline", "the PDF. # Set the \"embed_full_fonts\" property to \"False\" to apply subsetting to", "document to the PDF format in the form of a book fold. doc", "them to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\",", "export document structure while saving this document. In that case, # we can", "The output PDF document will contain an outline, which is a table of", "link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733", "#ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different color space for images in a", "to get a PDF reader to # apply a percentage-based zoom factor when", "PDF. doc = aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\") # Create a", "callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related warnings that occur upon", "# that format text within WMF images according to the size of the", "fields in a document right before a save operation. # This is the", "68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ", "+ \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please", "ensure that we have access to both the fonts in this document. original_fonts_sources", "outline. save_options.outline_options.headings_outline_levels = 5 # This document contains headings of levels 1 and", "aw.DocumentBuilder(doc) # Insert text with PAGE and NUMPAGES fields. These fields do not", "to \"1\" to render a portion of the document starting from the second", "Set the \"expanded_outline_levels\" property to \"2\" to automatically expand all heading level 2", "as a booklet, we must set the \"multiple_pages\" # properties of the page", "in the document at # their current values and display them as plain", "to give the zoom factor a value of 25%. options = aw.saving.PdfSaveOptions() options.zoom_behavior", "pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400, 400, pdf_doc_image_stream) #with pdf_document.pages[1].resources.images[2].to_stream()", "aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object", "the parity of their page numbers. # 1 - Save only the even-numbered", "file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4", "and 4 as \"missing\". # Set the \"create_missing_outline_levels\" property to \"True\" to include", "to PDF with three levels in the document outline. doc = aw.Document() builder", "that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "the document as the title bar. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello", "# content) # form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field()", "header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data)", "\"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: #", "pages in a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page", "incididunt ut labore et dolore magna aliqua.\") # Create a \"PdfSaveOptions\" object that", "11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode", "(False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable", "\"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to # automatically select the color", "# Configure our font sources to ensure that we have access to both", "= pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if", "all our fields will be up to date before saving. # Set the", "the \"color_mode\" property to \"NORMAL\" to render all images in color. pdf_save_options =", "4 from the outline. options.outline_options.headings_outline_levels = 4 # If an outline entry has", "/Widget/Rect [0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "the \"owner\" of several such \"sub-entries\". # In our document, the outline entries", "self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "__init__(self): # self.warnings: List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\")", "need them to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \")", "in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in", "discard all DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\"", "image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0", "#text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\",", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length)", "glyphs # that the document is using. The file will be considerably smaller,", "the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document", "render the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd", "#ExSummary:Shows how to export custom properties while converting a document to PDF. doc", "perform interpolation on images while saving a document to PDF. doc = aw.Document()", "heading level that is no larger than the value of the \"headings_outline_levels\" property.", "if we know that all our fields will be up to date before", "windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property to \"False\" to save all hyperlinks", "to get the PDF reader to display a separate panel # that allows", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"update_fields\"", "as the title bar. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title", "test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows", "part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\" property to \"True\" to render", "self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string())", "export bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\",", "Aspose.Words convert it to PDF. # Set the \"compliance\" property to \"PdfCompliance.PDF17\" to", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to \"False\"", "792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images", "to work with any layers present in the document. # Set the \"page_mode\"", "True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the fields", "5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\"", "# Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use the # \"image_compression\" property", "the tab that belongs to this document. # Set the \"display_doc_title\" to \"False\"", "output PDF, should there be any, at the cost of increased file size.", "#ExSummary:Shows how to display the title of the document as the title bar.", "Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks that", "\"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def", "entry, and so on. # In the outline, we can click on the", "test_downsample_options(self): #ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution", "the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks that are", "# Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use the # \"image_compression\" property", "open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML:", "\"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not be processed. Possibly", "== aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: #", "#ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the fields in a document immediately before", "link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711", "# def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool: # \"\"\"Returns", "apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber =", "to substitute DML shapes with their fallback shapes. # Set the \"dml_rendering_mode\" property", "aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2,", "not export the document structure. options.export_document_structure = export_document_structure # Suppose we export document", "# Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved. # # This", "filter out a set of pages from # our document to save in", "signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings", "#ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved PDF document digitally and timestamp it.", "a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan", "#self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False,", "= drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object that", "PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to not embed any fonts", "bigger the impact that this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\",", "with heading levels that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 #", "== 0 else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\"", "= aw.saving.PdfSaveOptions() # Create a digital signature and assign it to our SaveOptions", "to scale fonts # that format text within WMF images according to the", "#ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on", "images from the document will be downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\",", "document in full-screen mode, which takes over the monitor's display and has no", "their fallback shapes. # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to render", "XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) #", "lower than that of the device that is displaying the document. # Set", "the \"update_fields\" property to \"False\" to not update all the fields in a", "PAGE and NUMPAGES fields. These fields do not display the correct value in", "#ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in a document we convert to", "do not display the correct value in real time. # We will need", "property to \"NumeralFormat.CONTEXT\" to # look up the locale to determine what number", "PDF. # Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP compression #", "= export_document_structure # Suppose we export document structure while saving this document. In", "text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) def test_zoom_behaviour(self):", "the title of the document as the title bar. doc = aw.Document() builder", "\"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom)", "headings whose levels are above 4 from the outline. options.outline_options.headings_outline_levels = 4 #", "#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format", "##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap rendering and changing", "+ \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello", "Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+ part", "select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self):", ".PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply", "output PDF. # metafile_rendering_options.emulate_raster_operations = False # # Set the \"rendering_mode\" property to", "save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with", "#pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with", "modify how that method converts the document to .PDF. # Set the \"color_mode\"", "self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts,", "= datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256", "property to \"VECTOR_WITH_FALLBACK\" to try to render every metafile using vector graphics. #", "aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo: #", "provided # \"as is\", without warranty of any kind, either expressed or implied.", "headings with heading levels that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1", "Set the \"headings_outline_levels\" property to \"1\" to get the outline # to only", "for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization #ExSummary:Shows an option", "(018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold):", "(even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1", "#self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 5 (odd)\",", ",۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd", "#ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1 font substitution. doc = aw.Document()", ".PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale", "and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading", "options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read() self.assertIn(", "we can use to filter out a set of pages from # our", "= aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\")", "structure of the original document. # This helps with making documents searchable but", "\"rb\") as file: content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages", "XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) #", "# self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode ==", "= aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\" to get Aspose.Words to", "they have a heading level that is no larger than the value of", "if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False,", "the output PDF document. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to", "\"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>\", #", "as pdf_doc_image_stream: # if pdf_image_compression == aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) #", "Set the \"update_fields\" property to \"False\" to not update all the fields in", "aw.SaveFormat.PDF # The output PDF document will contain an outline, which is a", ",۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages from", "according to the size of the metafile on the page. # Set the", "which will allow a user to choose an option from a collection of", "if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1", "#self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self): for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS, aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with", "EMF part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to", "0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to", "(\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>\", # content)", "0>>/Dest[5 0 R /XYZ 85 68 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305", "save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() # doc.warning_callback = callback # doc.save(ARTIFACTS_DIR", "@property # def count(self): # return len(self.warnings) # def contains(self, source: aw.WarningSource, type:", "707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD", "images while saving a document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "downsample all images in a document that we save to PDF to 220", "to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) #", "#elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance ==", "the document's \"save\" method # # to modify how that method converts the", "test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to", "707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn(", "# apply a percentage-based zoom factor when we open the document with it.", "most accurate values in the PDF. options.update_fields = update_fields # We can clone", "[85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>\", content)", "method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\"", "document itself. options.page_mode = page_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name", "# When we open this document using a reader such as Adobe Acrobat,", "else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page =", "pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD):", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode", "2 (even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\")", "pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif", "733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤, ٥, ٦, ٧,", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL))", "modify how that method converts the document to .PDF and applies the configuration", "open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks", "+ \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "self.assertEqual(\"Please select a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\"", "callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback):", "Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the U+06F0", "aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to \"True\" to save all hyperlinks using", "any kind, either expressed or implied. import io import os from datetime import", "\"open_hyperlinks_in_new_window\" property to \"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR", "a way that helps us use it to make a booklet. # Set", "all form fields in the document at # their current values and display", "aspose.words as aw import aspose.pydrawing as drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR,", "with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1 font substitution. doc", "to PDF so that they open new pages when we click on them.", "builder = aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur", "# self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) <", "of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\" to", "-1 ( ) 1 (2) -1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for", "#ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to configure the rendering quality of DrawingML effects", "an EMF+ dual metafile if all of the EMF+ records are supported. #", "above 2 from the outline. # The last two headings we have inserted", "the interpolation effect if we saved the document with it enabled. doc.save(ARTIFACTS_DIR +", "display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the", "hyperlinks in a document we convert to PDF so that they open new", "aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "fox jumps over the lazy dog.\") # Configure our font sources to ensure", "#ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options when saving to PDF.", "\"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode", "location of its respective heading. # Set the \"headings_outline_levels\" property to \"4\" to", "arrow will appear to the left of the entry. This entry is the", "pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart", "warning.warning_type == type and warning.description == description) def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm", "content) #pdf_document = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction)", "\"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version of DrawingML effects. # Set the", "document outline. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings of levels", "# self.assertEqual(\"2 XYZ 85 48 0\", outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR", "text when we save the document to PDF. # Set the \"text_compression\" property", "pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4,", "8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\",", "to .PDF. options = aw.saving.PdfSaveOptions() # Create a digital signature and assign it", "Set the \"resolution_threshold\" property to only apply the downsampling to # images with", "page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\",", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000,", "#pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content = file.read().decode('utf-8') #if", "implied. import io import os from datetime import datetime, timedelta, timezone import aspose.words", "lower outline entries # and collapse all level and 3 and higher entries", "\"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the U+06F0 to U+06F9", "#self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False, True): with", "+ \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode", "to see the interpolation effect if we saved the document with it enabled.", "+ b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11", "+ b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count)", "property to \"True\" to get the reader that opens this document to interpolate", "+ \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000,", "\"headings_outline_levels\" property to \"4\" to exclude all headings whose levels are above 4", "= aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() self.assertTrue(any(font.full_font_name == \"Arial\" for font", "self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in", "this document, we will need to provide the password before accessing its contents.", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background", "#table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels", "for font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that we can pass", "file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0", "== aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts", "+ \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name)", "and collapse all level and 3 and higher entries when we open the", "metafile using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\"", "#ExSummary:Shows how to set the PDF standards compliance level of saved PDF documents.", "it as a PDF. This will make sure that all the fields will", "the document when we save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\",", "of the document as the title bar. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "the \"interpolate_images\" property to \"True\" to get the reader that opens this document", "save_options = aw.saving.PdfSaveOptions() # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts", "an output document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create", "pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\",", "world!\") builder.font.name = \"Courier New\" builder.writeln(\"The quick brown fox jumps over the lazy", "the next entry of the same or lower level, # an arrow will", "\"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a", "PDF reader to open the saved # document in full-screen mode, which takes", "2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR", "XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202 733 0\", link_annotations[4].destination.to_string()) #", "##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a fallback to bitmap rendering and changing type of", "٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC:", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2", "increase the size of already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\",", "# self.assertEqual(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100\",", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for", "logo.png\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type())", "level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor()", "output PDF. save_options.outline_options.default_bookmarks_outline_level = 1 # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to", "== \"Arial\" for font in font_sources[0].get_available_fonts())) self.assertTrue(any(font.full_font_name == \"Arvo\" for font in font_sources[1].get_available_fonts()))", "# an arrow will appear to the left of the entry. This entry", "self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1]", "of increasing the duration of the operation. # Set the \"memory_optimization\" property to", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g) 1 (, 10. November)", "pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to", "f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode ==", "in our document, with their PDF Type 1 equivalents. # Set the \"use_core_fonts\"", "aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we open this document", "PDF readers, such as Adobe Acrobat Pro, # to display the value of", "aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties", "Javascript code # that forces readers to open these links in new windows/browser", "= file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W", "\"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve the default scale of these fonts.", "up in a way that creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd", "page in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get", "the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the U+06F0 to", "b\"5 0 obj\\r\\n\" + # b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox", "def test_pdf_digital_signature(self): #ExStart #ExFor:PdfDigitalSignatureDetails #ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how", "True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with transparent backgrounds", "aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type", "import aspose.words as aw import aspose.pydrawing as drawing from api_example_base import ApiExampleBase, MY_DIR,", "0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist)", "+ \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN,", "0 ( ) 0 (1) 0 (0) 0 (.) 0 ( ) 0", "will appear in the outline of a saved PDF document. doc = aw.Document()", "memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\",", "a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings", "\"create_missing_outline_levels\" property to \"False\" to ignore missing outline levels, # and treat the", "render fallback shapes when saving to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape", "True/Length 11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type", "preserve the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\",", "have inserted above will not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options)", "# Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to render every metafile", "in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change", "+ \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif", "via \"View\" -> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options)", "test_export_document_structure(self): for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to", "a document to the PDF format using the Save method and the PdfSaveOptions", "method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # The output PDF", "how to sign a generated PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "as a supplement to the documentation, and is provided # \"as is\", without", "property to \"PdfPageMode.USE_THUMBS\" to get the PDF reader to display a separate panel", "the saved PDF. # Aspose.Words will also apply Flate compression to all images", "heading levels that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set", "aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\" object that we can", "adipiscing elit, \" + \"sed do eiusmod tempor incididunt ut labore et dolore", "with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK):", "saving to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\") # Create a \"PdfSaveOptions\" object", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\"", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text)", "in the document that it converts to PDF. # In most cases, the", "to export custom properties while converting a document to PDF. doc = aw.Document()", "self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will", "0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10", "the outline. # Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to include all headings", "to render a simplified version of DrawingML effects. # Set the \"dml_effects_rendering_mode\" property", "aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object that we can", "save_options) #ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ]", "of levels 1 and 5, and no headings with levels of 2, 3,", "test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages from the document. doc", "our SaveOptions object to sign the document when we save it to PDF.", ",۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self):", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Arvo\"", "builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") # Create a", "# doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not", "= 25 # When we open this document using a reader such as", "heading # and the next paragraph via \"View\" -> \"Show/Hide\" -> \"Navigation panes\"", "PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a", "how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format =", "Set the \"create_outlines_for_headings_in_tables\" property to \"True\" to include all headings within tables #", "the outline. # The last two headings we have inserted above will not", "options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\")", "\"Windows bar pdf title\" # Create a \"PdfSaveOptions\" object that we can pass", "#ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with transparent backgrounds while saving a", "PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we are rendering the document as", "in a heading-type style, will serve as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier", "quality of the Jpeg images that end up in the output PDF. #", "aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif", "+ \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(),", "World!\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "within WMF images according to the size of the metafile on the page.", "before we save it as a PDF. This will make sure that all", "== aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: #", "options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\")", "save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with", "when we save the document to PDF. The larger the document, the bigger", "with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to skip embedding", "certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\",", "+ \"PdfSaveOptions.table_heading_outlines.pdf\") #if create_outlines_for_headings_in_tables: # self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0,", "options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply our signature to the", "# self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts,", "\"Content\" navigation pane of Adobe Acrobat at the cost of increased file size.", "in a document immediately before saving it to PDF. doc = aw.Document() builder", "no headings with levels of 2, 3, and 4. # The output PDF", "open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode ==", "__getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i] # def clear(self): # \"\"\"Clears warning", "+ \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks()", "#self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), # pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in", "R/X2 11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content)", "pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows", "contain any corresponding headings when saving a PDF document. doc = aw.Document() builder", "we will have full use of all fonts if we edit the PDF.", "\"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 - Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR +", "Adobe Acrobat, we will need to zoom in on the image # to", "save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR +", "aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to \"1\" to display all # bookmarks", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks =", "85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1", "+ \"Transparent background logo.png\") # Create a \"PdfSaveOptions\" object that we can pass", "builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) # Create", "\"True\" to embed every glyph of every embedded font in the output PDF.", "to make a booklet. # Set the \"use_book_fold_printing_settings\" property to \"False\" to render", "3 (odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\")", "document with it. # Set the \"zoom_factor\" property to \"25\" to give the", "(aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document", "the output PDF. # metafile_rendering_options.emulate_raster_operations = False # # Set the \"rendering_mode\" property", "to render the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options)", "if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000,", "aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR +", "two fonts in our document, with their PDF Type 1 equivalents. # Set", "display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK)", "self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0", "brown fox jumps over the lazy dog.\") # Create a \"PdfSaveOptions\" object that", "is a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier New\"", "\"zoom_factor\" property to \"25\" to give the zoom factor a value of 25%.", "+ # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0", "disable subsetting when embedding fonts while rendering a document to PDF. doc =", "with three levels in the document outline. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "#if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0", "aw.DocumentBuilder(doc) # Create a table with three rows. The first row, # whose", "self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode):", "link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711", "the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow", "applies when opening a rendered PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to \"True\" to preblend", "0 (r) -1 ( ) 1 (2) -1 (0) 0 (1) 0 (8)]", "and below in the outline. save_options.outline_options.headings_outline_levels = 5 # This document contains headings", "content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype", "13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) #pdf_document =", "def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how", "#ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties while converting a document to PDF.", "of the pages, we can fold all the pages down the middle at", "are above 4 from the outline. options.outline_options.headings_outline_levels = 4 # If an outline", "options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the", "document that it converts to PDF. # In most cases, the color space", "to \"PdfCustomPropertiesExport.NONE\" to discard # custom document properties as we save the document", "sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et", "0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode", "0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13", "cases, the color space will be RGB. # Set the \"image_color_space_export_mode\" property to", "to \"False\" to ignore missing outline levels, # and treat the outline level", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) #", "footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to \"False\" not to have footnote/endnote symbols", "property to \"PdfCompliance.PDF_A1A\" to comply with the \"PDF/A-1a\" standard, # which complies with", "aw.Document(MY_DIR + \"WMF with text.docx\") # Create a \"PdfSaveOptions\" object that we can", "/Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733", "aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from", "be downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "to save form fields as interactive objects in the output PDF. # Set", "options.digital_signature_details.timestamp_settings.password) # The \"save\" method will apply our signature to the output document", "property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR +", "def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how", "over the monitor's display and has no controls visible. # Set the \"page_mode\"", "/Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\")", "only some of the pages in a document to PDF. doc = aw.Document()", "helps with making documents searchable but may significantly increase the size of already", "Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\",", "aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif", "(False, True): with self.subTest(create_note_hyperlinks=create_note_hyperlinks): #ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and endnotes", "elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\") #", "self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression type for", "aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000,", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action = pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def", "metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+", "to arrange the contents # in the output PDF in a way that", "#ExSummary:Shows how to change image color with saving options property. doc = aw.Document(MY_DIR", "to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to \"True\" to", "258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1", "default, Aspose.Words downsample all images in a document that we save to PDF", "== aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( # pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD), #", "to # render DrawingML effects with more accuracy and also with more processing", "with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling according to metafile", "= file.read().decode('utf-8') #if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE: # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4", "self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif", "\"save\" method # to modify how that method converts the document to .PDF.", "sit amet, consectetur adipiscing elit, \" + \"sed do eiusmod tempor incididunt ut", "the monitor's display and has no controls visible. # Set the \"page_mode\" property", "modify how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() #", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\"", "accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR +", "headings within tables # in the outline, provided that they have a heading", "٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif", "format used when saving to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id", "text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading", "to exclude all headings within tables, # such as the one we have", "what number of glyphs to use. # Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\"", "self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string())", "# def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self, i)", "-> bool: # \"\"\"Returns True if a warning with the specified properties has", "0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content)", "+ \"PdfSaveOptions.preblend_images.pdf\") image = pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content", "the document as we render it with the \"save\" method. signing_time = datetime.now()", "= aw.saving.PdfSaveOptions() # The output PDF document will contain an outline, which is", "zoom in on the image # to see the interpolation effect if we", "headings that can serve as TOC entries of levels 1 and 5. builder.paragraph_format.style_identifier", "the default scale of these fonts. save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options)", "# Set the \"update_fields\" property to \"True\" to iterate through all the document", "whose levels are above 4 from the outline. options.outline_options.headings_outline_levels = 4 # If", "standard, # which complies with \"PDF/A-1b\" as well as preserving the document structure", "EMF+ part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to", "# Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european numerals. # Set", "lower the memory footprint of large documents' saving operations # at the cost", "= preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1,", "# to modify how that method converts the document to .PDF and applies", "whole document to PDF with three levels in the document outline. doc =", "#signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority)", "aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that we can", "options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of the \"SaveOptions\" object to", "# Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to not embed any fonts in", "#self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels", "1 # Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all headings within", "#self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file:", "dual metafile if all of the EMF+ records are supported. # Otherwise, Aspose.Words", "aw import aspose.pydrawing as drawing from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR", "fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5", "# self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2 XYZ", "the two fonts in our document, with their PDF Type 1 equivalents. #", "def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows", "of the device that is displaying the document. # Set the \"interpolate_images\" property", "#if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode ==", "#if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\" +", "shapes with their fallback shapes. # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" #", "pdf_doc_fonts[1].is_embedded) def test_embed_core_fonts(self): for use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows", "lower level, # an arrow will appear to the left of the entry.", "85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W", "0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) #", "#ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression when saving a document", "before saving it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert", "objects in the output PDF. # Set the \"preserve_form_fields\" property to \"False\" to", "options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard #", "saved # document in full-screen mode, which takes over the monitor's display and", "within the output PDF document. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" #", "editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( #", "# Create a table with three rows. The first row, # whose text", "file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\"", "will only contain the second page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "supported.\", # callback.warnings[0].description) #class HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related warnings that", "[x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for a in", "+ \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13", "preferable option if we know that all our fields will be up to", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + #", ",۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages", "to # look up the locale to determine what number of glyphs to", "to display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR +", "text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U,", "(m) 0 (b) 0 (e) 0 (r) -1 ( ) 1 (2) -1", "content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\") #self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE:", "3rd level entry, and so on. # In the outline, we can click", "to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader # also to display the outline,", "logo.png\") builder.insert_image(img) # Create a \"PdfSaveOptions\" object that we can pass to the", "#ExStart #ExFor:PdfSaveOptions.create_note_hyperlinks #ExSummary:Shows how to make footnotes and endnotes function as hyperlinks. doc", "when rendering large documents to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create", "self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure", "pdf_compliance == aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U:", "links that, upon clicking, take us to their respective footnotes/endnotes. # Set the", "pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset) def test_embed_windows_fonts(self): for pdf_font_embedding_mode", "of the pages in a document to PDF. doc = aw.Document() builder =", "\"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\", text_absorber.text) def test_set_numeral_format(self): for numeral_format", "the PDF reader to display a separate panel # that allows us to", "doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that we can", "at 1/4 of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document =", "content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn( b\"<</Creator(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s)/Producer(\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0.\\0W\\0o\\0r\\0d\\0s\\0 \\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\",", "b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\")", "# Otherwise, Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set", "used when saving to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id =", "builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create a \"PdfSaveOptions\" object that we can", "property to \"NumeralFormat.EUROPEAN\" to use european numerals. # Set the \"numeral_format\" property to", "value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend", "all headings of levels 5 and below in the outline. save_options.outline_options.headings_outline_levels = 5", "\"use_book_fold_printing_settings\" property to \"False\" to render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold #", "#ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1 font substitution. doc = aw.Document() builder", "b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: self.assertIn(", "/XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter", "image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create a \"PdfSaveOptions\" object that we", "= pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif", "\"preserve_form_fields\" property to \"True\" to save form fields as interactive objects in the", "# Set the \"additional_text_positioning\" property to \"True\" to attempt to fix incorrect #", "#pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode ==", "document at # their current values and display them as plain text in", "#ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export #ExSummary:Shows how to export custom properties while converting a document", "#ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution of images in the", "== aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10,", "document. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that we", "select a fruit: \") # Insert a combo box which will allow a", "property to \"True\" to include all headings within tables # in the outline,", "document as Aspose.Words convert it to PDF. # Set the \"compliance\" property to", "# # to modify how that method converts the document to .PDF and", "find tags for elements such as the heading # and the next paragraph", "\"multiple_pages\" # properties of the page setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\".", "# Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to discard # custom document properties", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: #", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6", "save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR +", "= aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to \"True\" to", "def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how", "corresponding headings when saving a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "= pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES,", "output PDF document will treat outline levels 2, 3, and 4 as \"missing\".", "document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(100):", "location of its respective heading. # Set the \"headings_outline_levels\" property to \"5\" to", "Insert headings of levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\")", "def test_preblend_images(self): for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how", "R/Length 11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR +", "a table with three rows. The first row, # whose text we will", "= aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart #ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling", "+ \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content =", "doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i +", "will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\" property", "= aw.saving.PdfSaveOptions() # Since our document contains a custom font, embedding in the", "custom font, embedding in the output document may be desirable. # Set the", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ", "Aspose.Words will render the EMF part. save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode # Set the \"use_emf_embedded_to_wmf\"", "\"False\" to ignore missing outline levels, # and treat the outline level 5", "method converts the document to .PDF. # Set the \"display_doc_title\" to \"True\" to", "ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions) #ExSummary:Shows", "the value of the \"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options)", "#with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber", "images # with a background, which may reduce artifacts. # Set the \"preblend_images\"", "levels, # and treat the outline level 5 headings as level 2. save_options.outline_options.create_missing_outline_levels", "EMF+ part of an EMF+ dual metafile if all of the EMF+ records", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Create a digital signature", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1]", "levels 5 and below in the outline. save_options.outline_options.headings_outline_levels = 5 # This document", "to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property", "self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone #ExFor:SaveOptions.update_fields #ExSummary:Shows how to update all the fields in a", "also apply Flate compression to all images and ignore the \"image_compression\" property's value.", "contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document", "0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829", "text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading", "lazy dog.\") # Create a \"PdfSaveOptions\" object that we can pass to the", "take us to their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to \"False\"", "property to \"25\" to give the zoom factor a value of 25%. options", "Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use the # \"image_compression\" property to", "\"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a simplified version of DrawingML effects.", "/Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\")", "the \"use_book_fold_printing_settings\" property to \"True\" to arrange the contents # in the output", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL))", "= aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "#ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers", "file: # content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn(", "PDF Type 1 equivalents. # Set the \"use_core_fonts\" property to \"False\" to not", "property to \"False\" to make it so that the reader does not apply", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with", "bookmarks that are inside headers/footers. # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.FIRST\" to", "{info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in (False, True): with self.subTest(scale_wmf_fonts=scale_wmf_fonts): #ExStart", "EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render", "aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50,", "< text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) #", "property to \"4\" to exclude all headings whose levels are above 4 from", "to get the PDF reader to display a separate panel # with a", "increased file size. # Set the \"export_document_structure\" property to \"False\" to not export", "#7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\",", "٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠\", text_absorber.text) #elif numeral_format ==", "options.downsample_options.resolution_threshold = 128 # Only the first two images from the document will", "the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try to render every metafile using vector", "render DrawingML effects with more accuracy and also with more processing cost. options.dml_effects_rendering_mode", "that allows us to work with any layers present in the document. #", "= aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\")", "an arrow will appear to the left of the entry. This entry is", "# def __getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i] # def clear(self): #", "as file: # content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF, # aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): #", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\" to", "a saved PDF document digitally and timestamp it. doc = aw.Document() builder =", "style, will serve as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\")", "in the output PDF. # Set the \"image_compression\" property to \"PdfImageCompression.JPEG\" to use", "encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the editing of annotations. encryption_details.permissions", "This helps with making documents searchable but may significantly increase the size of", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading", "options) # Set the \"resolution\" property to \"36\" to downsample all images to", "+ # \"<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017", "both sides of the pages, we can fold all the pages down the", "\"use_core_fonts\" property to \"False\" to not apply PDF Type 1 fonts. options.use_core_fonts =", "XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ 85 654 0\", link_annotations[2].destination.to_string()) #", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\",", "a resolution that is above 128 ppi. options.downsample_options.resolution_threshold = 128 # Only the", "0, 0, 0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings)", "def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows", "if a warning with the specified properties has been generated.\"\"\" # return any(warning", "4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6, 7, 8, 9, 10,", "to limit the headings' level that will appear in the outline of a", "#link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions", "== aw.saving.EmfPlusDualRenderingMode.EMF_PLUS: # self.assertEqual(1, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + #", "to .PDF. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom", "#text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4,", "\"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression", "== aw.saving.PdfImageCompression.AUTO: # self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400,", "positioning operators. doc = aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create a \"PdfSaveOptions\"", "# Below are three \"page_set\" properties that we can use to filter out", "saved PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object", "document to .PDF. options = aw.saving.PdfSaveOptions() # The output PDF document will contain", "#ExSummary:Shows how to set Aspose.Words to skip embedding Arial and Times New Roman", "56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>\", content) self.assertIn(", "+ \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression == aw.saving.PdfTextCompression.NONE: self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with", "save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is partly supported.\", #", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1])", "resolution of images in the PDF document. doc = aw.Document(MY_DIR + \"Images.docx\") #", "page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber =", "3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn(", "builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") # Create a \"PdfSaveOptions\"", "as file: # content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a fruit:", "aw.DocumentBuilder(doc) # \"Arial\" is a standard font, and \"Courier New\" is a nonstandard", "level entry, and so on. # In the outline, we can click on", "0>>/Dest[5 0 R /XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to", "is only intended as a supplement to the documentation, and is provided #", "to .PDF. # Set the \"display_doc_title\" to \"True\" to get some PDF readers,", "= aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object that we can pass", "we can open it using Adobe Acrobat and find tags for elements such", "R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count)", "#tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( #", "desirable. # Set the \"embed_full_fonts\" property to \"True\" to embed every glyph of", "Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color space", "# Set the \"embed_full_fonts\" property to \"True\" to embed every glyph of every", "the numeral format used when saving to PDF. doc = aw.Document() builder =", "in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5 0 obj\\r\\n\" + # b\"<</Type", "it using Adobe Acrobat and find tags for elements such as the heading", "#with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data = file.read().decode('utf-8') #if header_footer_bookmarks_export_mode", "2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2", "to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the U+0660 to U+0669 range as", "builder.write(\"Page \") builder.insert_field(\"PAGE\", \"\") builder.write(\" of \") builder.insert_field(\"NUMPAGES\", \"\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Hello World!\") #", "+ # \"Page 3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" + # \"Page", "= aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3, 4, 5, 6,", "self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write additional text positioning operators. doc =", "1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "full use of all fonts if we edit the PDF. # Set the", "the \"memory_optimization\" property to \"True\" to lower the memory footprint of large documents'", "pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\")", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields else \"Page of \",", "self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84", "the document body. # Clicking on an entry in this outline will take", "to strengthen compression at the cost of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR", "doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8')", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR +", "in the document outline. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings", "\"False\" not to have footnote/endnote symbols link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR", "0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ 202", "the specified properties has been generated.\"\"\" # return any(warning for warning in self.warnings", "# b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI", "source code in this file # is only intended as a supplement to", "open these links in new windows/browser tabs. # Set the \"open_hyperlinks_in_new_window\" property to", "within tables # in the outline, provided that they have a heading level", "saved PDF document digitally and timestamp it. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "saved the document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR", "will apply our signature to the output document at this time. doc.save(ARTIFACTS_DIR +", "bookmarks in headers/footers in a document that we are rendering to PDF. doc", "Save every page: options.page_set = aw.saving.PageSet.all doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document =", "table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in", "#ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def", "# \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create", "with transparent backgrounds while saving a document to PDF. doc = aw.Document() builder", "\"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content)", "method converts the document to .PDF and applies the configuration # # in", "will contain an outline, which is a table of contents that lists headings", "654 0\", link_annotations[2].destination.to_string()) # self.assertEqual(\"1 XYZ 85 68 0\", link_annotations[3].destination.to_string()) # self.assertEqual(\"1 XYZ", "such as Adobe Acrobat, we will need to zoom in on the image", "\"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\") #input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name #text_fragment_absorber =", "options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read() if", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read() if create_note_hyperlinks: self.assertIn(", "will also apply Flate compression to all images and ignore the \"image_compression\" property's", "aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\") builder.insert_image(img) #", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2,", "we have access to both the fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources()", "== aw.saving.ColorMode.NORMAL: # self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: #", "aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document on both sides of the pages,", "PDF. options.update_fields = update_fields # We can clone PdfSaveOptions objects. options_copy = options.clone()", "freeze all form fields in the document at # their current values and", "\"PdfSaveOptions.export_page_set.all.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page", "if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document", "custom properties while converting a document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My", "# save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options)", "document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd", "setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections:", "that this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if", "entry of the same or lower level, # an arrow will appear to", "builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit", "from the document in black and white. # The size of the output", "pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode ==", "self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title of the document as", "#ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document to the PDF format using", "images in a document as we export it to PDF. doc = aw.Document()", "that lists headings in the document body. # Clicking on an entry in", "parameters: with self.subTest(uri=uri, result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False)", "document structure of the original document. # This helps with making documents searchable", "obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG):", "in the text act as links that, upon clicking, take us to their", "All Rights Reserved. # # This file is part of Aspose.Words. The source", "builder.end_table() # Create a \"PdfSaveOptions\" object that we can pass to the document's", "embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000,", "SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo):", "792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0 R/X4", "options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\"))", "headers/footers in a document that we are rendering to PDF. doc = aw.Document(MY_DIR", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name)", "over the lazy dog.\") # Configure our font sources to ensure that we", "it. # Set the \"zoom_factor\" property to \"25\" to give the zoom factor", "else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts):", "aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to \"True\" to replace some fonts, #", "content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents", "hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\" object", "= aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback", "= color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image", "#self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual( #", "# self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\",", "usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height)", "obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR +", "with self.subTest(pdf_text_compression=pdf_text_compression): #ExStart #ExFor:PdfSaveOptions #ExFor:PdfSaveOptions.text_compression #ExFor:PdfTextCompression #ExSummary:Shows how to apply text compression when", "for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to", "= aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to \"True\" to save form fields", "a metafile, which will require raster operations to render in the output PDF.", "4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL):", "contents # in the output PDF in a way that helps us use", "Acrobat, we will see the document scaled at 1/4 of its actual size.", ".PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to \"PdfImageCompression.AUTO\" to use", "# \"image_compression\" property to control the quality of the Jpeg images that end", "preserving the document structure of the original document. # This helps with making", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum", "= file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content = file.read() if export_document_structure:", "# The output PDF document will contain an outline, which is a table", "pdf_document.open_action.as_go_to_action() #self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE):", "# self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML):", ".PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output PDF document will", "#if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): # self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: #", "aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): with self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows", "5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page", "1 (,) 0 ( ) 0 (1) 0 (0) 0 (.) 0 (", "-1 (0) 0 (1) 0 (8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR", "builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\") # Create", "PDF Type 1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts:", "and collects formatting loss-related warnings that occur upon saving a document.\"\"\" # def", "as \"Field.Update()\", and \"Document.UpdateFields()\" # each time we need them to display accurate", "impact that this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd", "+ \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content =", "in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how", "to the document's \"save\" method # # to modify how that method converts", "#class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings = aw.WarningInfoCollection() # def warning(self, info:", "86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type", "saving operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback =", "builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") # Insert a combo box", "document as a booklet, we must set the \"multiple_pages\" # properties of the", "\"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "a user to choose an option from a collection of strings. builder.insert_combo_box(\"MyComboBox\", [\"Apple\",", "to render the EMF+ part of an EMF+ dual metafile. # Set the", "WMF images according to the size of the metafile on the page. #", "+ \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR +", "a fruit: \", text_fragment_absorber.text) # self.assertIn(\"11 0 obj\\r\\n\" + # \"<</Type /Annot/Subtype /Widget/P", ".PDF. options = aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to \"False\" to not", "to both the fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR,", "to save in an output PDF document based on the parity of their", "# self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i] # def", "+ \"Images.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "#ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression type for all images", "to get the PDF reader to open the saved # document in full-screen", "its respective heading. # Set the \"headings_outline_levels\" property to \"2\" to exclude all", "#ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on a saved", "self.assertEqual(1, pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1])", "the \"1.7\" standard. # Set the \"compliance\" property to \"PdfCompliance.PDF_A1A\" to comply with", "how that method converts the document to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable", "pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\")", "size of already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd", "Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to not embed any fonts in the", "i % 2 == 0 else 'even'})\") if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) #", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file: content", "content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( # b\"5", "document. # Set the \"display_doc_title\" to \"False\" to get such readers to display", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR", "save_options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the", "\"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber)", "get some PDF readers, such as Adobe Acrobat Pro, # to display the", "obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length", "== aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def", "the \"export_document_structure\" property to \"False\" to not export the document structure. options.export_document_structure =", "how to specify a compression type for all images in a document that", "method converts the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\"", "0 obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False, True):", "Save only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) #", "# to render the EMF+ part of an EMF+ dual metafile. # Set", "dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render", "pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file:", "to \"True\" to include all headings within tables # in the outline, provided", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\"", "document starting from the second page. options.page_set = aw.saving.PageSet(1) # This document will", "content) else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent", "and display them as plain text in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields", "to preserve document structure elements, which can assist in programmatically interpreting our document.", "annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for a in link_annotations if a.annotation_type ==", "self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart", "element positioning in the output PDF, should there be any, at the cost", "6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2", "= 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1,", "-> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with", "time. # We will need to manually update them using updating methods such", "85 677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1", "aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to \"True\" to", "self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "\"1\" to render a portion of the document starting from the second page.", "2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the", "doc = aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\") # Create a \"PdfSaveOptions\"", "document as we render it with the \"save\" method. signing_time = datetime.now() options.digital_signature_details", "6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1", "to \"False\" to not export the document structure. options.export_document_structure = export_document_structure # Suppose", "5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading", "the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 -", "#def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") # warning_callback", "\"scale_wmf_fonts_to_metafile_size\" property to \"True\" to scale fonts # that format text within WMF", "\"save\" method will apply our signature to the output document at this time.", "rendering the document as a booklet, we must set the \"multiple_pages\" # properties", "in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart #ExFor:OutlineOptions.create_missing_outline_levels #ExFor:PdfSaveOptions.outline_options #ExSummary:Shows how to work with", "(odd)\\r\\n\" + # \"Page 2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + #", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\"", "2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100\", text_absorber.text) #elif", "the documentation, and is provided # \"as is\", without warranty of any kind,", "#elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string())", "\"NumeralFormat.EUROPEAN\" to use european numerals. # Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to", "f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length", "# to text when we save the document to PDF. The larger the", "\"PdfSaveOptions.escaped_uri.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def", "\"PdfZoomBehavior.ZOOM_FACTOR\" to get a PDF reader to # apply a percentage-based zoom factor", ") 1 (2) -1 (0) 0 (1) 0 (8)] TJ\", # tj_operator.to_string()) #else:", "document contains a custom font, embedding in the output document may be desirable.", "self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color with saving", "to render the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options)", "== aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for create_note_hyperlinks in (False, True): with", "how to export Odd pages from the document. doc = aw.Document() builder =", "= effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "\\0f\\0o\\0r\\0\", content) self.assertIn( b\"/Company (\\xFE\\xFF\\0M\\0y\\0 \\0v\\0a\\0l\\0u\\0e)>>\", content) elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type", "# \"Page 5 (odd)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.all.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber()", "preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\") image =", "builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background logo.png\")", "= aw.Document() builder = aw.DocumentBuilder(doc) # \"Arial\" is a standard font, and \"Courier", "/Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>\",", "#page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #action = link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self):", "text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶", "searchable but may significantly increase the size of already large documents. save_options.compliance =", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") # Insert a", "update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False, True):", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\")", "that are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd", "we open the document with it. # Set the \"zoom_factor\" property to \"25\"", "Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader # also", "9, 10, 50, 100\") # Create a \"PdfSaveOptions\" object that we can pass", "== aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in", "U+0669 range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to #", "use_core_fonts in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type", "the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the PDF reader to display just", "Set the \"use_core_fonts\" property to \"True\" to replace some fonts, # including the", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start", "property to \"False\" to not apply PDF Type 1 fonts. options.use_core_fonts = use_core_fonts", "display the correct value in real time. # We will need to manually", "self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S", "result in parameters: with self.subTest(uri=uri, result=result): doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\",", "on them. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create", "on a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\")", "timeout period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds())", "to \"True\" to turn all footnote/endnote symbols # in the text act as", "normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we are rendering the document as a", "pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options", "\"default_bookmarks_outline_level\" property to \"1\" to display all # bookmarks at the first level", "#elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for", "most cases, the color space will be RGB. # Set the \"image_color_space_export_mode\" property", "#elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())", "3D effects.docx\") # warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options =", "\"Page of \", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields):", "the correct value in real time. # We will need to manually update", "bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True): with self.subTest(create_missing_outline_levels=create_missing_outline_levels): #ExStart", "= aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"My Office\", signing_time) options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason)", "#ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions on a saved PDF document. doc =", "(r) -1 ( ) 1 (2) -1 (0) 0 (1) 0 (8)] TJ\",", "1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude", "the PDF format using the Save method and the PdfSaveOptions class. doc =", "to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader to open the saved # document", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "of the EMF+ records are supported. # Otherwise, Aspose.Words will render the EMF", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a \"PdfSaveOptions\" object", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() #", "\"embed_full_fonts\" property to \"False\" to apply subsetting to fonts, saving only the glyphs", "in the saved PDF. # Aspose.Words will also apply Flate compression to all", "\"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as file:", "\"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content = file.read() if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\"", "# only export bookmarks in the first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\"", "3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if", "0) # Create a \"PdfSaveOptions\" object that we can pass to the document's", "creates a booklet. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\")", "the title bar. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title =", "use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000,", "such as the one we have created above from the outline. # Set", "to \"True\" to include all missing levels in the outline, # leaving blank", "= aspose.pdf.document(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window", "\"View\" -> \"Show/Hide\" -> \"Navigation panes\" -> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd", "+ \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" +", "#ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title of the document as the", "= aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + #", "to \"EMBED_ALL\" to embed all fonts in the output PDF. # Set the", "DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to # render DrawingML", "all images in a document that we are converting to PDF. doc =", "display a separate panel # that allows us to work with any layers", "aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256 self.assertEqual(\"Test Signing\", options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options)", "# compression to text when we save the document to PDF. # Set", "= ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual(", "the left of the entry. This entry is the \"owner\" of several such", "Set the \"additional_text_positioning\" property to \"True\" to attempt to fix incorrect # element", "the bigger the impact that this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR +", "ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF) # self.assertEqual( #", "open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\"", "# elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception):", "217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" + # \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12", "(aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to", "accuracy and also with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR", "to render all images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR", "# Set the \"use_core_fonts\" property to \"False\" to not apply PDF Type 1", "import io import os from datetime import datetime, timedelta, timezone import aspose.words as", "to save hyperlinks in a document we convert to PDF so that they", "will require raster operations to render in the output PDF. # metafile_rendering_options.emulate_raster_operations =", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber()", "mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored.", "\"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()", "= scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber", "document, the outline entries from the 5th heading level are sub-entries of the", "= encryption_details # When we open this document, we will need to provide", "builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\" object that we can pass to the", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows", ".PDF. # Set the \"display_doc_title\" to \"True\" to get some PDF readers, such", "self.warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS:", "render embedded EMF data # for metafiles that we can render as vector", "3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn(", "images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections: section = section.as_section()", "the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding in the", "obj\\r\\n19289 \", content) self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False, True): with", "added a fallback to bitmap rendering and changing type of warnings about unsupported", "self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode", "to \"False\" to render transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\",", "aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading 1.2.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.2.2.2.1\") builder.writeln(\"Heading 1.2.2.2.2\") #", "doc = aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\" object that we", "update all the fields in a document right before a save operation. #", "self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85", "in (False, True): with self.subTest(render_text_as_bookfold=render_text_as_bookfold): #ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_mode\" property", "up in the output PDF. pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\" property", ",۴ ,۳ ,۲ ,۱\", text_absorber.text) def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export", "self.assertEqual(19216, len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images", "= aw.Document() builder = aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i + 1}", "PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "#elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0", "a heading-type style, will serve as the column header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier =", "< text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL)) #", "\"PdfImageCompression.AUTO\" to use the # \"image_compression\" property to control the quality of the", "pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image", "entries are sub-entries of the second 3rd level entry, and so on. #", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"text_compression\" property to \"PdfTextCompression.NONE\" to", "for images in a document as we export it to PDF. doc =", "# Suppose we export document structure while saving this document. In that case,", "+ \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name)", "the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options) # 3 -", "property to \"True\" to save form fields as interactive objects in the output", "\"PDF/A-1b\" standard, # which aims to preserve the visual appearance of the document", "We will need to manually update them using updating methods such as \"Field.Update()\",", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"embed_full_fonts\" property to \"True\"", "pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode ==", "\"PdfImageCompression.JPEG\" to use the # \"image_compression\" property to control the quality of all", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset) #self.assertEqual(\"Arvo\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(embed_full_fonts,", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to \"True\" to", "\"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #if use_core_fonts: # self.assertEqual(\"Helvetica\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"Courier\", pdf_doc_fonts[1].font_name) #else:", "to ensure that we have access to both the fonts in this document.", "that helps us use it to make a booklet. # Set the \"use_book_fold_printing_settings\"", "# outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ", "if interpolate_images: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent", "#ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color with saving options property. doc =", "#ExSummary:Shows to process bookmarks in headers/footers in a document that we are rendering", "of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" #", "# Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): #", "\"False\" to exclude all headings within tables, # such as the one we", "to .PDF. save_options = aw.saving.PdfSaveOptions() save_options.save_format = aw.SaveFormat.PDF # The output PDF document", "section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print this document on both sides", "the fields in a document immediately before saving it to PDF. doc =", "fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance", "the output PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONE\" to not embed", "#ExSummary:Shows how to save a document to the PDF format in the form", "operations # at the cost of increasing the duration of the operation. #", "how enable/disable PDF Type 1 font substitution. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to", "677 0\", link_annotations[0].destination.to_string()) # self.assertEqual(\"1 XYZ 85 79 0\", link_annotations[1].destination.to_string()) # self.assertEqual(\"1 XYZ", "converts the document to .PDF. # Set the \"zoom_behavior\" property to \"PdfZoomBehavior.ZOOM_FACTOR\" to", "to set Aspose.Words to skip embedding Arial and Times New Roman fonts into", "# self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title)", "\"True\" to scale fonts # that format text within WMF images according to", "document to save in an output PDF document based on the parity of", "heading level are sub-entries of the second 4th level outline entry, # the", "R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif", "'rb') as file: # content = file.read().decode('utf-8') #if preserve_form_fields: # self.assertEqual(\"Please select a", "WMF fonts scaling according to metafile size on the page. doc = aw.Document(MY_DIR", "can assist in programmatically interpreting our document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W", "of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "the document # fields and update them before we save it as a", "normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "in the output document may be desirable. # Set the \"embed_full_fonts\" property to", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\"", "= aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of the \"SaveOptions\" object to #", "headings of levels 5 and below in the outline. save_options.outline_options.headings_outline_levels = 5 #", "in the tab that belongs to this document. # Set the \"display_doc_title\" to", "the output PDF. # Set the \"preserve_form_fields\" property to \"False\" to freeze all", "# \"\"\"Prints and collects formatting loss-related warnings that occur upon saving a document.\"\"\"", "aw.DocumentBuilder(doc) # Insert headings of levels 1 to 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading)", "outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233", "# If we are rendering the document as a booklet, we must set", "# self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) <", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object that", "aw.saving.PdfPageMode.FULL_SCREEN: # self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self):", "link to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR", "Adobe Acrobat and find tags for elements such as the heading # and", "= aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a \"PdfSaveOptions\" object that we can", "# Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF\" # to only render the EMF", "authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan of the", "#pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self):", "it. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Signed PDF contents.\") # Create a", "+ \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn(", "apply the downsampling to # images with a resolution that is above 128", "Set the \"open_hyperlinks_in_new_window\" property to \"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window =", "/XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS", "the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to", "= aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the \"memory_optimization\" property to \"True\" to lower the memory", "the quality of all images that end up in the output PDF. pdf_save_options.image_compression", "pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) #", "aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation", "#ExSummary:Shows how to create PDF document outline entries for headings inside tables. doc", "#ExStart #ExFor:PdfSaveOptions.use_book_fold_printing_settings #ExSummary:Shows how to save a document to the PDF format in", "#ExFor:PdfDigitalSignatureTimestampSettings.password #ExFor:PdfDigitalSignatureTimestampSettings.server_url #ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved PDF document digitally", "to render the EMF+ part of an EMF+ dual metafile if all of", "if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts", "This document contains headings of levels 1 and 5, and no headings with", "the \"default_bookmarks_outline_level\" property to \"1\" to display all # bookmarks at the first", "to text when we save the document to PDF. The larger the document,", "the page. doc = aw.Document(MY_DIR + \"WMF with text.docx\") # Create a \"PdfSaveOptions\"", "0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD,", "its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\")", "pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\")", "XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string()) #", "the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within the output", "to convert a whole document to PDF with three levels in the document", "with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks", "the PDF format in the form of a book fold. doc = aw.Document(MY_DIR", "document to .PDF. # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve", "# \"Content\" navigation pane of Adobe Acrobat at the cost of increased file", "# self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400,", "how to configure Enhanced Windows Metafile-related rendering options when saving to PDF. doc", "0 R>>\", data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4, outline_item_collection.count) # self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title)", "# self.assertEqual(3, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) # self.assertEqual(\"My value\", pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for", "open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file:", "# Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve the default scale", "# Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within", "this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True) aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source]) font_sources =", "aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to \"False\" to not update all the", "if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False, True):", "aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to", "change image color with saving options property. doc = aw.Document(MY_DIR + \"Images.docx\") #", "quality of all images that end up in the output PDF. pdf_save_options.image_compression =", "in the output PDF in a way that helps us use it to", "the \"create_outlines_for_headings_in_tables\" property to \"True\" to include all headings within tables # in", "\"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources)", "options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read() if", "to the document's \"save\" method # to modify how that method converts the", "them. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", \"https://www.google.com/search?q=%20aspose\", False) # Create a", "50, 100\") # Create a \"PdfSaveOptions\" object that we can pass to the", "levels 1, 2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier", "we can click on the arrow of the \"owner\" entry to collapse/expand all", "#image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_m_l_effects.pdf\", \"rb\")", "entry, # the 4th and 5th heading level entries are sub-entries of the", "# By default, Aspose.Words downsample all images in a document that we save", "\" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4", "#ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings' level that will appear in", "info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: # print(f\"{info.warning_type}: {info.description}.\") # self.save_warnings.warning(info) def test_fonts_scaled_to_metafile_size(self): for scale_wmf_fonts in", "# each time we need them to display accurate values. builder.write(\"Page \") builder.insert_field(\"PAGE\",", "# Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export any bookmarks", "self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode", "dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file:", "work with outline levels that do not contain any corresponding headings when saving", "aw.Document() builder = aw.DocumentBuilder(doc) # Create a table with three rows. The first", "# Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all headings within tables,", "= pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type()) ##ExStart", "in the output PDF. pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\" property to", "to modify how that method converts the document to .PDF. # Set the", "# that the document is using. The file will be considerably smaller, #", "aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i + 1} ({'odd' if i %", "+ 1} ({'odd' if i % 2 == 0 else 'even'})\") if i", "= aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent", "to modify how that method converts the document to .PDF. options = aw.saving.PdfSaveOptions()", "content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE: # self.assertIn(", "# This file is part of Aspose.Words. The source code in this file", "# Set the \"additional_text_positioning\" property to \"False\" to render the document as usual.", "# digitally sign the document as we render it with the \"save\" method.", "serve as TOC entries of levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading)", "layers present in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to", "0 ( ) 0 (N) 0 (o) 0 (v) 0 (e) 0 (m)", "glyphs from the U+0660 to U+0669 range as numbers. # Set the \"numeral_format\"", "Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.METADATA\" # to preserve custom properties in an", "and endnotes.docx\") # Create a \"PdfSaveOptions\" object that we can pass to the", "\"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber)", "Set the \"create_missing_outline_levels\" property to \"False\" to ignore missing outline levels, # and", "1.2.2.2.2\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "to U+0669 range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to", "warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc = aw.Document(MY_DIR + \"WMF", "dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the", "#ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size #ExSummary:Shows how to WMF fonts scaling according to metafile size on the", "#ExSummary:Shows how to convert only some of the pages in a document to", "dolor sit amet, consectetur adipiscing elit, \" + \"sed do eiusmod tempor incididunt", "pane of Adobe Acrobat at the cost of increased file size. # Set", "document that we are rendering to PDF. doc = aw.Document(MY_DIR + \"Bookmarks in", "def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool: # \"\"\"Returns True", "we render it with the \"save\" method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder,", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #with open(ARTIFACTS_DIR", "section in doc.sections: section = section.as_section() section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING # Once we print", "Odd pages from the document. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i", "== aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC: # self.assertEqual(\"۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳", "+ \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g) 1 (, 10. November) -1", "property to \"PdfTextCompression.FLATE\" to apply ZIP compression # to text when we save", "document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\"", "#annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in annotation_selector.selected] #if", "== aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content)", "content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0", "entry has subsequent entries of a higher level inbetween itself and the next", "but may significantly increase the size of already large documents. save_options.compliance = pdf_compliance", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.font.locale_id = 4096 # CultureInfo(\"ar-AR\").lcid builder.writeln(\"1, 2, 3,", "aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties that we can use to filter", "Set the \"embed_full_fonts\" property to \"True\" to embed every glyph of every embedded", "link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: # self.assertEqual(0, annotation_selector.selected.count) def", "to \"EmfPlusDualRenderingMode.EMF_PLUS\" to # to render the EMF+ part of an EMF+ dual", "since there are no usable headings. # Set the \"create_missing_outline_levels\" property to \"False\"", "form = pdf_document.form # self.assertEqual(1, pdf_document.form.count) # field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name)", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page", "apply any # compression to text when we save the document to PDF.", "world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by disallowing all permissions. encryption_details.permissions =", "the \"encryption_details\" property. save_options.encryption_details = encryption_details # When we open this document, we", "= options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber", "\"DrawingML shape effects.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "\"True\" to make the document structure, such tags, available via the # \"Content\"", "\"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader # also to display", "1} ({'odd' if i % 2 == 0 else 'even'})\") if i <", "the page setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section", "= numeral_format doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber", "== aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def", "how to configure the rendering quality of DrawingML effects in a document as", "return self.warnings[i] # def clear(self): # \"\"\"Clears warning collection.\"\"\" # self.warnings.clear() # @property", "0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\", content) elif page_mode == aw.saving.PdfPageMode.USE_OC: self.assertIn( f\"<</Type", "/Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677", "aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes when", "720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>\", content) self.assertIn(", "= pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL):", "#pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual(", "+ \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\",", "R /XYZ 85 79 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524", "property to \"PdfTextCompression.NONE\" to not apply any # compression to text when we", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf title\" #", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R>>stream\",", "options.digital_signature_details.reason) self.assertEqual(\"My Office\", options.digital_signature_details.location) self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR", "is the \"owner\" of several such \"sub-entries\". # In our document, the outline", "of DrawingML effects in a document as we save it to PDF. doc", "self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A):", "of a saved PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert", "self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber", "self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on images while saving a", "def count(self): # return len(self.warnings) # def contains(self, source: aw.WarningSource, type: aw.WarningType, description:", "to # export bookmarks that are in all headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR", "get the outline # to only register headings with heading levels that are", "= aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now())", "builder = aw.DocumentBuilder(doc) builder.insert_hyperlink(\"Testlink\", uri, False) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.escaped_uri.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "stream: image.save(stream) if preblend_images: self.assertIn(\"11 0 obj\\r\\n20849 \", content) self.assertEqual(17898, len(stream.getvalue())) else: self.assertIn(\"11", "take us to the location of its respective heading. # Set the \"headings_outline_levels\"", "only export bookmarks in the first section's header/footers. # Set the \"header_footer_bookmarks_export_mode\" property", "allow a user to choose an option from a collection of strings. builder.insert_combo_box(\"MyComboBox\",", "labore et dolore magna aliqua.\") # Create a \"PdfSaveOptions\" object that we can", "PDF. pdf_save_options.image_compression = pdf_image_compression # Set the \"jpeg_quality\" property to \"10\" to strengthen", "aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML effects.", "the \"PDF/A-1b\" standard, # which aims to preserve the visual appearance of the", "in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title #ExSummary:Shows how to display the title", "\"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings: List[aw.WarningInfo]", "to display the outline, if possible. # Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\"", "def test_custom_properties_export(self): for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE, aw.saving.PdfCustomPropertiesExport.STANDARD, aw.saving.PdfCustomPropertiesExport.METADATA): with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode): #ExStart #ExFor:PdfCustomPropertiesExport #ExFor:PdfSaveOptions.custom_properties_export", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "== \"Arvo\" for font in font_sources[1].get_available_fonts())) # Create a \"PdfSaveOptions\" object that we", "# Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")", "can click on the arrow of the \"owner\" entry to collapse/expand all its", "to \"NumeralFormat.SYSTEM\" to determine the symbol set from regional settings. options.numeral_format = numeral_format", "options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content", "we edit the document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts:", "== aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image =", "see the document scaled at 1/4 of its actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\",", "= doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello world!\") builder.paragraph_format.style = doc.styles.get_by_name(\"Normal\") builder.write(\"Lorem ipsum dolor sit amet,", "b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212", "(2) -1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True):", "warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") # self.warnings.add(info) # def __getitem__(self, i) -> aw.WarningInfo:", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)", "a booklet. # Set the \"use_book_fold_printing_settings\" property to \"False\" to render the PDF", "11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML", "document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the PDF reader", "(o) 0 (v) 0 (e) 0 (m) 0 (b) 0 (e) 0 (r)", "text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: #", "+ \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", \"rb\") as file: content =", "save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure", "[0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2", "= aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password)", "\"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\")", "outline entries # and collapse all level and 3 and higher entries when", "that we save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold)", "outline entries for headings inside tables. doc = aw.Document() builder = aw.DocumentBuilder(doc) #", "object of the \"SaveOptions\" object to # digitally sign the document as we", "the quality of the Jpeg images that end up in the output PDF.", "permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to allow the editing of annotations.", "0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", #", "converts to PDF. # In most cases, the color space will be RGB.", "are rendering to PDF. doc = aw.Document(MY_DIR + \"Bookmarks in headers and footers.docx\")", "self.verify_image(400, 400, pdf_doc_image_stream) # elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\"))", "headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor", "0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # break #elif rendering_mode ==", "self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file: content = file.read()", "self.subTest(rendering_mode=rendering_mode): #ExStart #ExFor:EmfPlusDualRenderingMode #ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode #ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf #ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering", "code # that forces readers to open these links in new windows/browser tabs.", "self.assertEqual(\"Bookmark_1\", outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) #", "three \"page_set\" properties that we can use to filter out a set of", "be considerably smaller, # but we may need access to any custom fonts", "pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream: # self.verify_image(400,", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if color_mode == aw.saving.ColorMode.NORMAL:", "\"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read()", "# Set the \"headings_outline_levels\" property to \"2\" to exclude all headings whose levels", "#if numeral_format == aw.saving.NumeralFormat.EUROPEAN: # self.assertEqual(\"1, 2, 3, 4, 5, 6, 7, 8,", "output PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to \"1\" to", "file.read() if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6 0", "contents that lists headings in the document body. # Clicking on an entry", "self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: # self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN:", "StringComparison.ORDINAL)) def test_zoom_behaviour(self): #ExStart #ExFor:PdfSaveOptions.zoom_behavior #ExFor:PdfSaveOptions.zoom_factor #ExFor:PdfZoomBehavior #ExSummary:Shows how to set the default", "navigation pane of Adobe Acrobat at the cost of increased file size. #", "3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE):", "as plain text in the output PDF. pdf_options.preserve_form_fields = preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\",", "property to \"True\" to scale fonts # that format text within WMF images", "that method converts the document to .PDF and applies the configuration # #", "pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self):", "0 (1) 0 (0) 0 (.) 0 ( ) 0 (N) 0 (o)", "not display the correct value in real time. # We will need to", "3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text)", "this document using a reader such as Adobe Acrobat, we will see the", "pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level) #self.assertFalse(pdf_document.outlines[1][1].open) #self.assertEqual(2, pdf_document.outlines[1][1].level) #self.assertTrue(pdf_document.outlines[1][2].open) #self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for", "all the document # fields and update them before we save it as", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): # aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document =", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\")", "\"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.ALL\" to # export bookmarks that are in all headers/footers.", "property to \"2\" to automatically expand all heading level 2 and lower outline", "on images while saving a document to PDF. doc = aw.Document() builder =", "to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to \"True\" to", "Aspose.Words downsample all images in a document that we save to PDF to", "Set the \"page_mode\" property to \"PdfPageMode.USE_OC\" to get the PDF reader to display", "# self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode) #elif page_mode == aw.saving.PdfPageMode.USE_OC: # self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode) def test_note_hyperlinks(self): for", "= pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR +", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #4\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL))", "0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field()", "warning with the specified properties has been generated.\"\"\" # return any(warning for warning", "0 0 0]/FT /Sig/T\", content) self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\").has_digital_signature) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\")", "#def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback()", "takes over the monitor's display and has no controls visible. # Set the", "document to .PDF. options = aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of the", "that method converts the document to .PDF. save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF) # Set the", "== aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(20000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(100000, pdf_doc_image.to_stream().length) #self.assertEqual(400,", "aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback #", "3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC", "font in the output PDF. # The document's size may become very large,", "# Set the \"display_doc_title\" to \"False\" to get such readers to display the", "\"PdfSaveOptions.set_numeral_format.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.set_numeral_format.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #if", "their respective footnotes/endnotes. # Set the \"create_note_hyperlinks\" property to \"False\" not to have", "8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0 R/X4 13", "period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\",", "any, at the cost of increased file size. # Set the \"additional_text_positioning\" property", "November) -1 ( ) 1 (2) -1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self):", "to \"False\" to save the document as a PDF normally. save_options.memory_optimization = memory_optimization", "will display # the most accurate values in the PDF. options.update_fields = update_fields", "pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show", "b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0", "document to PDF. # Set the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP", "733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype", "to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\" property. save_options.encryption_details", "def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how to create a \"Span\" tag in the", "when we save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details", "any layers present in the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\"", "to # images with a resolution that is above 128 ppi. options.downsample_options.resolution_threshold =", "the image # to see the interpolation effect if we saved the document", "with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document outline entries for", "= aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings that can serve as TOC", "range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to use european", "that end up in the output PDF. # Set the \"image_compression\" property to", "builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\"", "every metafile using vector graphics. # metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a", "document will be downsampled at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document", "builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object that we can pass to the", "import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR class ExPdfSaveOptions(ApiExampleBase): def test_one_page(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExFor:Document.save(BytesIO,SaveOptions)", "[85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>\", content)", "and endnotes function as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") #", "pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x in annotation_selector.selected]", "10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password", "create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF", "display the outline, if possible. # Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to", "= aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") # Create a \"PdfSaveOptions\" object that we can", "an entry in this outline will take us to the location of its", "aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier", "= [x.as_link_annotation() for x in annotation_selector.selected] #if create_note_hyperlinks: # self.assertEqual(8, len([a for a", "to configure the rendering quality of DrawingML effects in a document as we", "DML shapes with their fallback shapes. # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\"", "/FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression", "# This helps with making documents searchable but may significantly increase the size", "time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\",", "property to \"PdfCustomPropertiesExport.STANDARD\" # to preserve custom properties within the output PDF document.", "digital signature and assign it to our SaveOptions object to sign the document", "as file: content = file.read() self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect", "the 5th heading level are sub-entries of the second 4th level outline entry,", "objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "lifespan of the timestamp is 100 seconds. self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) # We can set", "builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4 builder.writeln(\"Heading 1.2.2.1\") builder.writeln(\"Heading", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle", "from page two, which will only contain the second page. doc.save(stream, options) #ExEnd", "8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter", "accurate values in the PDF. options.update_fields = update_fields # We can clone PdfSaveOptions", "for i in range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, \"", "outline levels, # and treat the outline level 5 headings as level 2.", "= 1 # Set the \"create_outlines_for_headings_in_tables\" property to \"False\" to exclude all headings", "in full-screen mode, which takes over the monitor's display and has no controls", "def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how", "that are no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\"", "form fields as interactive objects in the output PDF. # Set the \"preserve_form_fields\"", "the \"use_book_fold_printing_settings\" property to \"False\" to render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold", "#ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to skip embedding Arial and Times", "aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\") builder.insert_paragraph() builder.writeln(\"Png image:\") builder.insert_image(IMAGE_DIR + \"Transparent background", "/Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector =", "document to .PDF. options = aw.saving.PdfSaveOptions() # Below are three \"page_set\" properties that", "\"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #self.assertEqual(\"Hello world!\",", "Set the \"resolution\" property to \"36\" to downsample all images to 36 ppi.", "\"JohnDoe\", \"<PASSWORD>\", timedelta(minutes=30)) self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds()) self.assertEqual(\"https://freetsa.org/tsr\", options.digital_signature_details.timestamp_settings.server_url) self.assertEqual(\"JohnDoe\", options.digital_signature_details.timestamp_settings.user_name) self.assertEqual(\"<PASSWORD>\", options.digital_signature_details.timestamp_settings.password) # The", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\") #with open(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content =", "part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS\"", "as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor =", "aw.DocumentBuilder(doc) builder.writeln(\"Page 1.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 2.\") builder.insert_break(aw.BreakType.PAGE_BREAK) builder.writeln(\"Page 3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\",", "if create_missing_outline_levels else 3, bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True):", "0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection = pdf_doc.outlines # self.assertEqual(4,", "##ExSummary:Shows added a fallback to bitmap rendering and changing type of warnings about", "before a save operation. # This is the preferable option if we know", "# \"as is\", without warranty of any kind, either expressed or implied. import", "for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression): #ExStart #ExFor:PdfSaveOptions.image_compression #ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") doc.built_in_document_properties.title = \"Windows bar pdf", "+ # \"Page 4 (even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self):", "0 (0) 0 (.) 0 ( ) 0 (N) 0 (o) 0 (v)", "content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W", "Set the \"display_doc_title\" to \"True\" to get some PDF readers, such as Adobe", "\"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with their fallback shapes. # Set the", "\"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: #", "the reader that opens this document to interpolate images. # Their resolution should", "in the output PDF. # Set the \"preserve_form_fields\" property to \"False\" to freeze", "\"compliance\" property to \"PdfCompliance.PDF17\" to comply with the \"1.7\" standard. # Set the", "# self.warnings: List[aw.WarningInfo] = [] # def warning(info: aw.WarningInfo): # print(f\"{info.warning_type}: {info.description}.\") #", "+ \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\") #page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction),", "fonts if we edit the PDF. # Set the \"embed_full_fonts\" property to \"False\"", "display all # bookmarks at the first level of the outline in the", "to \"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in the first section's header/footers. #", "= \"Arvo\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\") # Configure", "(even)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text) def test_export_language_to_span_tag(self): #ExStart #ExFor:PdfSaveOptions.export_language_to_span_tag #ExSummary:Shows how", "0), signature_field.signature.date) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0A\\0s\\0p\\0o\\0s\\0e\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) #self.assertIsNone(signature_field.signature.timestamp_settings) def test_render_metafile(self):", "configuration # # in our MetafileRenderingOptions object to the saving operation. # save_options", "\"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual(", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Contents of signed PDF.\") certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR", "self.subTest(export_document_structure=export_document_structure): #ExStart #ExFor:PdfSaveOptions.export_document_structure #ExSummary:Shows how to preserve document structure elements, which can assist", "self.assertLess(300000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) #", "# to only register headings with heading levels that are no larger than", "# Extend permissions to allow the editing of annotations. encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS |", "+ \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber)", "5 # This document contains headings of levels 1 and 5, and no", "a PDF normally. save_options.memory_optimization = memory_optimization doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self):", "to # to render the EMF+ part of an EMF+ dual metafile. #", "= aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS: #", "#self.assertEqual(400, pdf_doc_image.width) #self.assertEqual(400, pdf_doc_image.height) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) #pdf_doc_image = pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO:", "#ExFor:PdfSaveOptions.jpeg_quality #ExFor:PdfImageCompression #ExSummary:Shows how to specify a compression type for all images in", "amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore", "= pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK:", "and the next entry of the same or lower level, # an arrow", "\"headings_outline_levels\" property. pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.table_heading_outlines.pdf\", pdf_save_options) #ExEnd #pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR", "to text when we save the document to PDF. # Set the \"text_compression\"", "the device that is displaying the document. # Set the \"interpolate_images\" property to", "test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() #", "levels are above 4 from the outline. options.outline_options.headings_outline_levels = 4 # If an", "unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings = aw.WarningInfoCollection() #", "/DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>\", content) else: self.assertIn(", "# Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The", "property to \"False\" to apply subsetting to fonts, saving only the glyphs #", "\"True\" to replace some fonts, # including the two fonts in our document,", "reader # also to display the outline, if possible. # Set the \"page_mode\"", "last two headings we have inserted above will not appear. save_options.outline_options.headings_outline_levels = 2", "outline_item_collection[1].title) # self.assertEqual(\"1 XYZ 233 806 0\", outline_item_collection[1].destination.to_string()) # self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1", "# with a thumbnail for each page in the document. # Set the", "1.2.2\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "#ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages from the document. doc = aw.Document()", "= aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a \"PdfSaveOptions\" object that", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber()", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"update_fields\" property", "output PDF. # The document's size may become very large, but we will", "#ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how to save hyperlinks in a document we convert to PDF", "to set the default zooming that a reader applies when opening a rendered", "DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR", "of an EMF+ dual metafile if all of the EMF+ records are supported.", "document may be larger with this setting. # Set the \"color_mode\" property to", "of all fonts if we edit the PDF. # Set the \"embed_full_fonts\" property", "action.destination.as_xyz_explicit_destination().zoom) def test_page_mode(self): for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN, aw.saving.PdfPageMode.USE_THUMBS, aw.saving.PdfPageMode.USE_OC, aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): with self.subTest(page_mode=page_mode):", "options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\") # The default lifespan of the timestamp is", "len(stream.getvalue())) def test_interpolate_images(self): for interpolate_images in (False, True): with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows", "for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how", "have a heading level that is no larger than the value of the", "-> \"Tags\". doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.export_document_structure.pdf\", \"rb\") as", "text act as links that, upon clicking, take us to their respective footnotes/endnotes.", "elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0", "the \"page_mode\" property to \"PdfPageMode.USE_THUMBS\" to get the PDF reader to display a", "aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description) #", "some PDF readers, such as Adobe Acrobat Pro, # to display the value", "from datetime import datetime, timedelta, timezone import aspose.words as aw import aspose.pydrawing as", "to set a different color space for images in a document as we", "0 obj\\r\\n\" + b\"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T\", content) #pdf_document", "/Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79", "is a table of contents that lists headings in the document body. #", "of the page setup objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for", "use the CMYK color space for all images in the saved PDF. #", "#else: # self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0,", "/XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS", "a reader applies when opening a rendered PDF document. doc = aw.Document() builder", "metafile, which will require raster operations to render in the output PDF. #", "to PDF. doc = aw.Document(MY_DIR + \"DrawingML shape fallbacks.docx\") # Create a \"PdfSaveOptions\"", "\"True\" to include all missing levels in the outline, # leaving blank outline", "def test_export_page_set(self): #ExStart #ExFor:FixedPageSaveOptions.page_set #ExSummary:Shows how to export Odd pages from the document.", "render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)", "our document contains a custom font, embedding in the output document may be", "should be lower than that of the device that is displaying the document.", "options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to \"True\" to preblend transparent", "apply text compression when saving a document to PDF. doc = aw.Document() builder", "PDF. This will make sure that all the fields will display # the", "0 obj\\r\\n<</Length 13 0 R>>stream\", file.read()) elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR +", "clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document =", "len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK])) # self.assertEqual(\"1 XYZ 85", "test_encryption_permissions(self): #ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set", "(g) 1 (, 10. November) -1 ( ) 1 (2) -1 (018)] TJ\",", "We can set our timeout period via the constructor. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\",", "#ExStart #ExFor:DownsampleOptions #ExFor:DownsampleOptions.downsample_images #ExFor:DownsampleOptions.resolution #ExFor:DownsampleOptions.resolution_threshold #ExFor:PdfSaveOptions.downsample_options #ExSummary:Shows how to change the resolution of", "our document, the outline entries from the 5th heading level are sub-entries of", "\"headings_outline_levels\" property to \"5\" to include all headings of levels 5 and below", "# also to display the outline, if possible. # Set the \"page_mode\" property", "the \"preserve_form_fields\" property to \"True\" to save form fields as interactive objects in", "Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to display the outline navigation pane in", "part of Aspose.Words. The source code in this file # is only intended", "readers to display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title = display_doc_title doc.save(ARTIFACTS_DIR", "# content = file.read() #if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, # aw.saving.DmlEffectsRenderingMode.SIMPLIFIED): # self.assertIn( #", "0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate", "\"True\" to preblend transparent images # with a background, which may reduce artifacts.", "792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK:", "method # to modify how that method converts the document to .PDF. pdf_options", "aw.saving.PdfCustomPropertiesExport.NONE: self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content) self.assertNotIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) elif", "R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0", "R /XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354", "pages down the middle at once, # and the contents will line up", "# Once we print this document on both sides of the pages, we", "method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\"", "<</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(0, image_placement_absorber.image_placements.count) # self.assertEqual(28, table_absorber.table_list.count) #elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE:", "to \"10\" to strengthen compression at the cost of image quality. pdf_save_options.jpeg_quality =", "# to preserve custom properties within the output PDF document. # Set the", "can use to filter out a set of pages from # our document", "R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0", "pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")", "as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\", save_options) #ExEnd #pdf_document =", "to render in the output PDF. # metafile_rendering_options.emulate_raster_operations = False # # Set", "to only allow nonstandard fonts' embedding in the output PDF. # Set the", "the location of its respective heading. # Set the \"headings_outline_levels\" property to \"5\"", "# if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \" + info.description) # self.warnings.warning(info)", "operation. # Set the \"memory_optimization\" property to \"False\" to save the document as", "= pdf_document.pages[1].resources.images[1] with open(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", \"rb\") as file: content = file.read() with", "<</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())", "/Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name)", "1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading 1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a", "0 R /XYZ 85 677 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201", "\"PdfSaveOptions.page_mode.pdf\", options) #ExEnd doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file:", "assign it to our SaveOptions object to sign the document when we save", "also to display the outline, if possible. # Set the \"page_mode\" property to", "= aw.DocumentBuilder(doc) # Create a table with three rows. The first row, #", "second page. options.page_set = aw.saving.PageSet(1) # This document will contain one page starting", "fields in the document at # their current values and display them as", "0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) def test_encryption_permissions(self):", "# field = form.fields[0].as_combo_box_field() # self.assertEqual(\"MyComboBox\", field.full_name) # self.assertEqual(3, field.options.count) # self.assertEqual(\"Apple\", field.value)", "of Aspose.Words. The source code in this file # is only intended as", "packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\",", "the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "level of saved PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a", "to \"True\" to lower the memory footprint of large documents' saving operations #", "aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property to \"True\" to make the document structure,", "# self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False,", "pdf_doc_image.get_color_type()) #elif color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self):", "202 733 0\", link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1", "left of the entry. This entry is the \"owner\" of several such \"sub-entries\".", "to anything. options.create_note_hyperlinks = create_note_hyperlinks doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\", options) #ExEnd with open(ARTIFACTS_DIR +", "render transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd pdf_document", "1 (2) -1 (0) 0 (1) 0 (8)] TJ\", # tj_operator.to_string()) #else: #", "how to preserve document structure elements, which can assist in programmatically interpreting our", "#self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings", "self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) # self.assertEqual(0, pdf_document.form.count) def", "#ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to skip embedding Arial and Times New", "print this document on both sides of the pages, we can fold all", "#pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 5", "self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title in (False, True): with self.subTest(display_doc_title=display_doc_title): #ExStart #ExFor:PdfSaveOptions.display_doc_title", "1 (odd)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 5 (odd)\", text_absorber.text)", "that we can render as vector graphics. save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.render_metafile.pdf\",", "\"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded)", "to make footnotes and endnotes function as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes", "warning.source == source and warning.warning_type == type and warning.description == description) def test_pdf_digital_signature(self):", "the document to .PDF. options = aw.saving.PdfSaveOptions() # The output PDF document will", "R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\"", "of already large documents. save_options.compliance = pdf_compliance doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.compliance.pdf\", save_options) #ExEnd #pdf_document", "3.\") with open(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\", \"wb\") as stream: # Create a \"PdfSaveOptions\" object", "outline_item_collection[3].title) # self.assertEqual(\"2 XYZ 85 806 0\", outline_item_collection[3].destination.to_string()) # self.assertEqual(\"Bookmark_4\", outline_item_collection[4].title) # self.assertEqual(\"2", "with a background, which may reduce artifacts. # Set the \"preblend_images\" property to", "b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\",", "document. doc = aw.Document() builder = aw.DocumentBuilder(doc) for i in range(5): builder.writeln(f\"Page {i", "\", text_fragment_absorber.text_fragments[1].text) def test_preserve_form_fields(self): for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields", "to only render the EMF part of an EMF+ dual metafile. # Set", "test_embed_windows_fonts(self): for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.saving.PdfFontEmbeddingMode.EMBED_NONE, aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD): with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode): #ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\") #self.assertTrue(pdf_document.form.signatures_exist) #signature_field = pdf_document.form[1].as_signature_field() #self.assertEqual(\"AsposeDigitalSignature\", signature_field.full_name) #self.assertEqual(\"AsposeDigitalSignature\", signature_field.partial_name) #self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type())", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO:", "self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading", "\"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts' embedding in the output", "1 fonts. options.use_core_fonts = use_core_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\", options) if use_core_fonts: self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR", "#pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields else \"Page of \", text_fragment_absorber.text_fragments[1].text) def", "in an XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with", "R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>\", data) # outline_item_collection = pdf_doc.outlines", "aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit,", "levels of 2, 3, and 4. # The output PDF document will treat", "pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2 - Save only", "using a reader such as Adobe Acrobat, we will see the document scaled", "StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\",", "def warning(self, info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: # print(\"Unsupported operation: \"", "property to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader to open the saved #", "= aw.saving.PdfSaveOptions() # Set the \"open_hyperlinks_in_new_window\" property to \"True\" to save all hyperlinks", "#ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback shapes when saving to PDF. doc =", "pdf_doc.outlines.count) # self.assertEqual(\"Customers\", pdf_doc.outlines[1].title) #else: # self.assertEqual(0, pdf_doc.outlines.count) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_doc.pages[1]) #self.assertEqual(\"Customers\",", "+ \"PdfSaveOptions.render_metafile.pdf\", \"rb\") as file: # content = file.read() #if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF,", "\\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def test_pdf_digital_signature_timestamp(self): #ExStart #ExFor:PdfDigitalSignatureDetails.timestamp_settings #ExFor:PdfDigitalSignatureTimestampSettings #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str) #ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan) #ExFor:PdfDigitalSignatureTimestampSettings.password", "aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to \"True\" to attempt to fix incorrect", "pdf_text_compression == aw.saving.PdfTextCompression.FLATE: self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\")) with open(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", \"rb\") as", "if all of the EMF+ records are supported. # Otherwise, Aspose.Words will render", "scale_wmf_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber =", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.memory_optimization.pdf\", save_options) #ExEnd def test_escape_uri(self): parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"),", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"preblend_images\" property to \"True\" to", "the \"headings_outline_levels\" property to \"5\" to include all headings of levels 5 and", "the \"text_compression\" property to \"PdfTextCompression.FLATE\" to apply ZIP compression # to text when", "actual size. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.zoom_behaviour.pdf\") #action", "resolution that is above 128 ppi. options.downsample_options.resolution_threshold = 128 # Only the first", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\") #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", \"password\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber)", "\"resolution_threshold\" property to only apply the downsampling to # images with a resolution", "document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document =", "warning collection.\"\"\" # self.warnings.clear() # @property # def count(self): # return len(self.warnings) #", "image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self): # self.save_warnings = aw.WarningInfoCollection() # def", "as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: #", "def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart #ExFor:PdfSaveOptions.open_hyperlinks_in_new_window #ExSummary:Shows how", "612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0", "the memory footprint of large documents' saving operations # at the cost of", "# tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1", "bitmap rendering and changing type of warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self):", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object", "in (False, True): with self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1", "link_annotations[4].destination.to_string()) # self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733", "\"numeral_format\" property to \"NumeralFormat.ARABIC_INDIC\" to # use glyphs from the U+0660 to U+0669", "options property. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that", "for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:DmlEffectsRenderingMode #ExFor:PdfSaveOptions.dml_effects_rendering_mode #ExFor:SaveOptions.dml_effects_rendering_mode", "Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved. # # This file", "(8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( #", "/S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3 0 R/Contents", "aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that we can pass to", "in headers and footers.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"dml_rendering_mode\" property", "text positioning operators. doc = aw.Document(MY_DIR + \"Text positioning operators.docx\") # Create a", "# save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options # callback = ExPdfSaveOptions.HandleDocumentWarnings() #", "4 # If an outline entry has subsequent entries of a higher level", "self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE: self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd", "def test_preserve_form_fields(self): for preserve_form_fields in (False, True): with self.subTest(preserve_form_fields=preserve_form_fields): #ExStart #ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how", "are no larger than 1. pdf_save_options.outline_options.headings_outline_levels = 1 # Set the \"create_outlines_for_headings_in_tables\" property", "be larger with this setting. # Set the \"color_mode\" property to \"NORMAL\" to", "Tf )/AP<</N 12 0 R>>>>\", # content) # form = pdf_document.form # self.assertEqual(1,", "Adobe Acrobat at the cost of increased file size. # Set the \"export_document_structure\"", "In the outline, we can click on the arrow of the \"owner\" entry", "panel # that allows us to work with any layers present in the", "how to change image color with saving options property. doc = aw.Document(MY_DIR +", "= aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we open this document using a", "the same or lower level, # an arrow will appear to the left", "operations to render in the output PDF. # metafile_rendering_options.emulate_raster_operations = False # #", "\"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[0 (S) 0 (a) 0 (m) 0 (s) 0", ".PDF. save_options = aw.saving.PdfSaveOptions() save_options.text_compression = aw.saving.PdfTextCompression.NONE # Set the \"additional_text_positioning\" property to", "\"expanded_outline_levels\" property to \"2\" to automatically expand all heading level 2 and lower", "effects in a document as we save it to PDF. doc = aw.Document(MY_DIR", "#ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different color space for images", "# to display the value of the document's \"title\" built-in property in the", "save_warning_callback.save_warnings[0].description, # \"Image can not be processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback):", "+ b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847", "and no headings with levels of 2, 3, and 4. # The output", "\"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for create_missing_outline_levels in (False, True):", "in a document that we save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220,", "use glyphs from the U+06F0 to U+06F9 range as numbers. # Set the", "our document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\") builder.writeln(\"Hello", "to display a separate panel # that allows us to work with any", "the output document may be desirable. # Set the \"embed_full_fonts\" property to \"True\"", "# Set the \"open_hyperlinks_in_new_window\" property to \"True\" to save all hyperlinks using Javascript", "\" + info.description) # self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST,", "as we export it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg", "be RGB. # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the", "= open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as", "(aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different", "contains headings of levels 1 and 5, and no headings with levels of", "# Only the first two images from the document will be downsampled at", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count) #self.assertEqual(5, pdf_document.outlines.visible_count) #self.assertTrue(pdf_document.outlines[1].open) #self.assertEqual(1, pdf_document.outlines[1].level)", "document to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR +", "while saving this document. In that case, # we can open it using", "to get such readers to display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.display_doc_title", "object to the saving operation. # save_options = aw.saving.PdfSaveOptions() # save_options.metafile_rendering_options = metafile_rendering_options", "images in color. pdf_save_options = aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options)", "file: content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281", "metafile_rendering_options.emulate_raster_operations = False # # Set the \"rendering_mode\" property to \"VECTOR_WITH_FALLBACK\" to try", "respective heading. # Set the \"headings_outline_levels\" property to \"2\" to exclude all headings", "to U+06F9 range as numbers. # Set the \"numeral_format\" property to \"NumeralFormat.EUROPEAN\" to", "#self.assertEqual(2, pdf_document.outlines[1][2].level) def test_update_fields(self): for update_fields in (False, True): with self.subTest(update_fields=update_fields): #ExStart #ExFor:PdfSaveOptions.clone", "Set the \"create_note_hyperlinks\" property to \"True\" to turn all footnote/endnote symbols # in", "part of an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\"", "pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA:", "= aw.saving.PdfSaveOptions() pdf_save_options.color_mode = color_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.color_rendering.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "= aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode ==", "\"False\" to get such readers to display the document's filename. pdf_save_options = aw.saving.PdfSaveOptions()", "aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color", "R/Filter /FlateDecode>>\", content) else: self.assertIn( b\"7 0 obj\\r\\n\" + b\"<</Type /XObject/Subtype /Image/Width 400/Height", "\"color_mode\" property to \"NORMAL\" to render all images in color. pdf_save_options = aw.saving.PdfSaveOptions()", "0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self):", "the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\") #pdf_doc_fonts =", "#ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime) #ExFor:PdfDigitalSignatureDetails.hash_algorithm #ExFor:PdfDigitalSignatureDetails.location #ExFor:PdfDigitalSignatureDetails.reason #ExFor:PdfDigitalSignatureDetails.signature_date #ExFor:PdfDigitalSignatureHashAlgorithm #ExFor:PdfSaveOptions.digital_signature_details #ExSummary:Shows how to sign a generated", "opening an output document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") #", "True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" +", "# Set the \"preserve_form_fields\" property to \"False\" to freeze all form fields in", "/Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode", "we save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details =", "# Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to comply with the \"PDF/A-1b\" standard,", "\"preserve_form_fields\" property to \"False\" to freeze all form fields in the document at", "to set permissions on a saved PDF document. doc = aw.Document() builder =", "+ \"Footnotes and endnotes.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "document immediately before saving it to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "# Set the \"default_bookmarks_outline_level\" property to \"1\" to display all # bookmarks at", "\"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) # Create a timestamp", "157 733 0\", link_annotations[6].destination.to_string()) # self.assertEqual(\"1 XYZ 212 711 0\", link_annotations[7].destination.to_string()) #else: #", "# self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3", "400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode", "quality of DrawingML effects in a document as we save it to PDF.", "save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle =", "\"dml_rendering_mode\" property to \"DmlRenderingMode.FALLBACK\" # to substitute DML shapes with their fallback shapes.", "== aw.saving.PdfCompliance.PDF_A2A: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: #", "/Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>\",", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"page_index\" to \"1\" to render", "pdf_doc_fonts[1].font_name) #else: # self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) # self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def", "method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\"", "Office\", datetime.now()) # Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings(\"https://freetsa.org/tsr\", \"JohnDoe\", \"<PASSWORD>\")", "pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD: # self.assertEqual(3,", "level outline entry, # the 4th and 5th heading level entries are sub-entries", "parameters = [ (r\"https://www.google.com/search?q= aspose\", \"https://www.google.com/search?q=%20aspose\"), (r\"https://www.google.com/search?q=%20aspose\", \"https://www.google.com/search?q=%20aspose\"), ] for uri, result in", "a standard font, and \"Courier New\" is a nonstandard font. builder.font.name = \"Arial\"", "10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\") #with pdf_document.pages[1].resources.images[1].to_stream()", "type(GoToURIAction), # link_annot.action.get_type()) ##ExStart ##ExFor:MetafileRenderingMode ##ExFor:MetafileRenderingOptions ##ExFor:MetafileRenderingOptions.emulate_raster_operations ##ExFor:MetafileRenderingOptions.rendering_mode ##ExFor:IWarningCallback ##ExFor:FixedPageSaveOptions.metafile_rendering_options ##ExSummary:Shows added a", "= aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber() #table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6,", "if possible. # Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get the PDF", "# self.warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.warning_type ==", "0 R /XYZ 157 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494", "pdf_document.pages[1].resources.images[2] #if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: #", "return len(self.warnings) # def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool:", "#ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode #ExSummary:Shows how to change image color with saving options", "+ \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def __init__(self): # self.warnings:", "#ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings' level that will appear", "open(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", \"rb\") as file: content = file.read() if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE:", "contents will line up in a way that creates a booklet. doc.save(ARTIFACTS_DIR +", "0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\",", "4 0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn( f\"<</Type /Catalog/Pages 3", "#page = pdf_document.pages[1] #link_annot = page.annotations[1].as_link_annotation() #self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction), # link_annot.action.get_type())", "= aw.Document() doc.custom_document_properties.add(\"Company\", \"My value\") # Create a \"PdfSaveOptions\" object that we can", "def __init__(self): # self.save_warnings = aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if", "#page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL)) #page.accept(annotation_selector) #link_annotations = [x.as_link_annotation() for x", "#self.assertEqual(\"Aspose.Words\", pdf_document.info.creator) #self.assertTrue(pdf_document.info.producer.startswith(\"Aspose.Words\")) #if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE: # self.assertEqual(2, pdf_document.info.count) # self.assertEqual(3, pdf_document.metadata.count)", "#ExStart #ExFor:PdfSaveOptions.font_embedding_mode #ExFor:PdfFontEmbeddingMode #ExSummary:Shows how to set Aspose.Words to skip embedding Arial and", "0>>/Dest[5 0 R /XYZ 85 654 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [258.15499878", "with saving options property. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\"", "def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions #ExFor:ColorMode #ExFor:FixedPageSaveOptions.color_mode", "headers/footers. save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", save_options) #ExEnd #pdf_doc = aspose.pdf.document(ARTIFACTS_DIR +", "self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) #", "that method converts the document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the", "builder.font.name = \"Arvo\" builder.writeln(\"The quick brown fox jumps over the lazy dog.\") #", "the document, the bigger the impact that this will have. options.text_compression = pdf_text_compression", "/Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \" + b\"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\",", "#self.assertEqual(\"Customers\", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text) #self.assertEqual(\"<NAME>\", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text) def test_expanded_outline_levels(self): #ExStart #ExFor:Document.save(str,SaveOptions) #ExFor:PdfSaveOptions #ExFor:OutlineOptions.headings_outline_levels #ExFor:OutlineOptions.expanded_outline_levels", "0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO, aw.saving.PdfImageCompression.JPEG): with self.subTest(pdf_image_compression=pdf_image_compression):", "if we saved the document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options) #ExEnd", "as links that, upon clicking, take us to their respective footnotes/endnotes. # Set", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text() #if apply_additional_text_positioning: # self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\"))", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.save_as_pdf_book_fold.pdf\") #text_absorber = TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold:", "StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL))", "#self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type()) #self.assertEqual(date.today(), signature_field.signature.date.date()) #self.assertEqual(\"\\xFE\\xFF\\0M\\0o\\0r\\0z\\0a\\0l\\0.\\0M\\0e\", signature_field.signature.authority) #self.assertEqual(\"\\xFE\\xFF\\0M\\0y\\0 \\0O\\0f\\0f\\0i\\0c\\0e\", signature_field.signature.location) #self.assertEqual(\"\\xFE\\xFF\\0T\\0e\\0s\\0t\\0 \\0S\\0i\\0g\\0n\\0i\\0n\\0g\", signature_field.signature.reason) def", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property", "# to render the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\",", "save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to \"1\" to display all", "how to save a document to the PDF format in the form of", "dog.\") # Create a \"PdfSaveOptions\" object that we can pass to the document's", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"export_document_structure\" property", "builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3 builder.writeln(\"Heading", "can not be processed. Possibly unsupported image format.\") #class SaveWarningCallback(aw.IWarningCallback): # def __init__(self):", "document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_compression\" property to \"PdfImageCompression.AUTO\"", "aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # # Set the", "#pdf_doc.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.header_footer_bookmarks_export_mode.pdf\", \"rb\") as file: # data = file.read().decode('utf-8') #if", "options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) #", "import os from datetime import datetime, timedelta, timezone import aspose.words as aw import", "higher entries when we open the document. options.outline_options.expanded_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\",", "export custom properties while converting a document to PDF. doc = aw.Document() doc.custom_document_properties.add(\"Company\",", "transparent images # with a background, which may reduce artifacts. # Set the", "PDF. save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to \"1\" to display", "R /XYZ 202 733 0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092", "we can pass to the document's \"save\" method # to modify how that", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Below are", "#table_absorber.visit(pdf_document.pages[1]) #if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: # self.assertEqual(6, table_absorber.table_list.count) #elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: #", "use the # \"image_compression\" property to control the quality of all images that", "preserve_form_fields doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", pdf_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\") #self.assertEqual(1, pdf_document.pages.count)", "at this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\")", "True if a warning with the specified properties has been generated.\"\"\" # return", "ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\", options) # Set the", "aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def", "#ExFor:PdfDigitalSignatureTimestampSettings.timeout #ExFor:PdfDigitalSignatureTimestampSettings.user_name #ExSummary:Shows how to sign a saved PDF document digitally and timestamp", "with self.subTest(page_mode=page_mode): #ExStart #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows how to set instructions for some PDF", "open(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type", "\"NumeralFormat.CONTEXT\" to # look up the locale to determine what number of glyphs", "the \"SaveOptions\" object to # digitally sign the document as we render it", "# Start by disallowing all permissions. encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL # Extend permissions to", "0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE): self.assertIn(", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) # Restore the original font sources. aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources) #ExEnd #pdf_document =", "self.assertEqual(2, pdf_document.info.count) # self.assertEqual(4, pdf_document.metadata.count) # self.assertEqual(\"Aspose.Words\", pdf_document.metadata[\"xmp:CreatorTool\"].to_string()) # self.assertEqual(\"Company\", pdf_document.metadata[\"custprops:Property1\"].to_string()) #elif pdf_custom_properties_export_mode", "the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine the symbol set from regional settings.", "to use european numerals. # Set the \"numeral_format\" property to \"NumeralFormat.SYSTEM\" to determine", "increased file size. # Set the \"additional_text_positioning\" property to \"False\" to render the", "self.assertEqual(\"1 XYZ 258 711 0\", link_annotations[5].destination.to_string()) # self.assertEqual(\"1 XYZ 157 733 0\", link_annotations[6].destination.to_string())", "all images to 36 ppi. options.downsample_options.resolution = 36 # Set the \"resolution_threshold\" property", "options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber =", "test_compliance(self): for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U, aw.saving.PdfCompliance.PDF17, aw.saving.PdfCompliance.PDF_A2A): with self.subTest(pdf_compliance=pdf_compliance): #ExStart #ExFor:PdfSaveOptions.compliance #ExFor:PdfCompliance #ExSummary:Shows", "scale fonts # that format text within WMF images according to the size", "can pass to the document's \"save\" method # # to modify how that", "\"rb\") as file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847", "= 1 # Set the \"header_footer_bookmarks_export_mode\" property to \"HeaderFooterBookmarksExportMode.NONE\" to # not export", "version of DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.FINE\" to #", "converts the document to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property", "R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", # content) # self.assertEqual(21, image_placement_absorber.image_placements.count)", "R /XYZ 212 711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document", "to not update all the fields in a document right before a save", "pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE): with self.subTest(pdf_text_compression=pdf_text_compression):", "how that method converts the document to .PDF and applies the configuration #", "#ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable subsetting when embedding fonts", "for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode", "Acrobat and find tags for elements such as the heading # and the", "page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>\",", "us to the location of its respective heading. # Set the \"headings_outline_levels\" property", "entries for headings inside tables. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Create", "doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content", "symbols # in the text act as links that, upon clicking, take us", "\"False\" to save all hyperlinks normally. options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", options)", "\"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8') if page_mode == aw.saving.PdfPageMode.FULL_SCREEN: self.assertIn( f\"<</Type", "not appear. save_options.outline_options.headings_outline_levels = 2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\", save_options) #ExEnd #bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor()", "to ignore missing outline levels, # and treat the outline level 5 headings", "\"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as file: content = file.read()", "#self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning", "# look up the locale to determine what number of glyphs to use.", "DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render a", "locale to determine what number of glyphs to use. # Set the \"numeral_format\"", "== aw.saving.PdfImageCompression.JPEG: # self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400,", "R>>\\r\\n\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\") #if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES): #", "Roman fonts into a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc) #", "access to both the fonts in this document. original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources() folder_font_source =", "save_options = aw.saving.PdfSaveOptions() # save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) #", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self):", "#if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO: # self.assertLess(25000, pdf_doc_image.to_stream().length) #elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK: # self.assertLess(18000,", "self.subTest(use_core_fonts=use_core_fonts): #ExStart #ExFor:PdfSaveOptions.use_core_fonts #ExSummary:Shows how enable/disable PDF Type 1 font substitution. doc =", "aw.saving.PdfCustomPropertiesExport.METADATA: self.assertIn( b\"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "this document to interpolate images. # Their resolution should be lower than that", "of Adobe Acrobat at the cost of increased file size. # Set the", "+ \"PdfSaveOptions.note_hyperlinks.pdf\", \"rb\") as file: content = file.read() if create_note_hyperlinks: self.assertIn( b\"<</Type /Annot/Subtype", "the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look up the locale to determine", "== aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0", "# Enable encryption via the \"encryption_details\" property. save_options.encryption_details = encryption_details # When we", "# The document's size may become very large, but we will have full", "to render transparent images normally. options.preblend_images = preblend_images doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.preblend_images.pdf\", options) #ExEnd", "/Widget/Rect [0 0 0 0]/FT /Sig/T\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\") #self.assertTrue(pdf_document.form.signatures_exist)", "self.subTest(preblend_images=preblend_images): #ExStart #ExFor:PdfSaveOptions.preblend_images #ExSummary:Shows how to preblend images with transparent backgrounds while saving", "the second page. options.page_set = aw.saving.PageSet(1) # This document will contain one page", "text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit the headings'", "def __getitem__(self, i) -> aw.WarningInfo: # return self.warnings[i] # def clear(self): # \"\"\"Clears", "property to \"2\" to exclude all headings whose levels are above 2 from", "the outline, provided that they have a heading level that is no larger", "document to .PDF. pdf_save_options = aw.saving.PdfSaveOptions() # Set the \"image_color_space_export_mode\" property to \"PdfImageColorSpaceExportMode.AUTO\"", "in a document that we are rendering to PDF. doc = aw.Document(MY_DIR +", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") # Create a \"PdfSaveOptions\" object that we can", "self.assertEqual( # save_warning_callback.save_warnings[0].description, # \"Image can not be processed. Possibly unsupported image format.\")", "metafile on the page. # Set the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to #", "Set the \"zoom_factor\" property to \"25\" to give the zoom factor a value", "link_Annot.action.as_go_to_uri_action() #self.assertEqual(result, action.uri) def test_open_hyperlinks_in_new_window(self): for open_hyperlinks_in_new_window in (False, True): with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window): #ExStart", "self.assertEqual( # \"[0 (S) 0 (a) 0 (m) 0 (s) 0 (t) 0", "10, 50, 100\", text_absorber.text) #elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC: # self.assertEqual(\", ٢, ٣, ٤,", "(0) 0 (1) 0 (8)] TJ\", # tj_operator.to_string()) #else: # self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR +", "In our document, the outline entries from the 5th heading level are sub-entries", "/FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3", "field.value) #else: # self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\", content) #", "the \"page_index\" to \"1\" to render a portion of the document starting from", "to render a portion of the document starting from the second page. options.page_set", "# content) # self.assertEqual(21, image_placement_absorber.image_placements.count) # self.assertEqual(4, table_absorber.table_list.count) def test_drawing_ml_fallback(self): for dml_rendering_mode in", "The output PDF document will treat outline levels 2, 3, and 4 as", "builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5 builder.writeln(\"Heading 1.1.1.1.1\") builder.writeln(\"Heading 1.1.1.1.2\") # Create a \"PdfSaveOptions\"", "- Save only the odd-numbered pages: options.page_set = aw.saving.PageSet.odd doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\", options)", "options = aw.saving.PdfSaveOptions() options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR options.zoom_factor = 25 # When we open", "scaling according to metafile size on the page. doc = aw.Document(MY_DIR + \"WMF", "if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML: self.assertIn( b\"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox", "3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC", "aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) # Create a timestamp authority-verified timestamp. options.digital_signature_details.timestamp_settings", "/Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\\xFE\\xFF\\0M\\0y\\0C\\0o\\0m\\0b\\0o\\0B\\0o\\0x)/Opt \" +", "Windows Metafile-related rendering options when saving to PDF. doc = aw.Document(MY_DIR + \"EMF.docx\")", "to \"PdfImageCompression.AUTO\" to use the # \"image_compression\" property to control the quality of", "the \"scale_wmf_fonts_to_metafile_size\" property to \"False\" to # preserve the default scale of these", "builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\") builder.end_row() builder.insert_cell() builder.write(\"<NAME>\") builder.end_table() # Create a", "dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK: # self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False, True):", "aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") encryption_details = aw.saving.PdfEncryptionDetails(\"password\", \"\") # Start by", "the outline. options.outline_options.headings_outline_levels = 4 # If an outline entry has subsequent entries", "(aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode #ExFor:OutlineOptions #ExFor:OutlineOptions.default_bookmarks_outline_level #ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode #ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows", "< 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that we can pass to", "711 0]>>\", content) else: self.assertNotIn( b\"<</Type /Annot/Subtype /Link/Rect\", content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "effect if we saved the document with it enabled. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.interpolate_images.pdf\", save_options)", "\"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\")) else: self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\"))", "this stage. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.lower_resolution.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image", "the document to .PDF. save_options = aw.saving.PdfSaveOptions() # The output PDF document will", "the output document at this time. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR +", "5 headings as level 2. save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.create_missing_outline_levels.pdf\", save_options) #ExEnd", "the outline, if possible. # Set the \"page_mode\" property to \"PdfPageMode.USE_NONE\" to get", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\")) # with self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for", "# self.warnings.warning(info) ##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode):", "render_text_as_bookfold # If we are rendering the document as a booklet, we must", "options = aw.saving.PdfSaveOptions() # Set the \"create_note_hyperlinks\" property to \"True\" to turn all", "cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document =", "XMP packet. options.custom_properties_export = pdf_custom_properties_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.custom_properties_export.pdf\", options) #ExEnd with open(ARTIFACTS_DIR +", "which aims to preserve the visual appearance of the document as Aspose.Words convert", "to the PDF format using the Save method and the PdfSaveOptions class. doc", "HandleDocumentWarnings(aw.IWarningCallback): # \"\"\"Prints and collects formatting loss-related warnings that occur upon saving a", "= aw.saving.PdfSaveOptions() # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.NONE\" to discard all DrawingML", "# \"Page 4 (even)\", text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber()", "the document to .PDF. pdf_options = aw.saving.PdfSaveOptions() # Set the \"preserve_form_fields\" property to", "to discard all DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" #", "rendering large documents to PDF. doc = aw.Document(MY_DIR + \"Rendering.docx\") # Create a", "encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY # Create a \"PdfSaveOptions\" object that we can", "self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts #ExSummary:Shows how to enable or disable subsetting when embedding", "the second page. doc.save(stream, options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count)", "5.045, text_fragment_rectangle.width, delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart", "#ExStart #ExFor:PdfEncryptionDetails.__init__ #ExFor:PdfSaveOptions.encryption_details #ExFor:PdfEncryptionDetails.permissions #ExFor:PdfEncryptionDetails.owner_password #ExFor:PdfEncryptionDetails.user_password #ExFor:PdfPermissions #ExFor:PdfEncryptionDetails #ExSummary:Shows how to set permissions", "g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>\", # content) # form =", "+ \"PdfSaveOptions.compliance.pdf\") #if pdf_compliance == aw.saving.PdfCompliance.PDF17: # self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) #elif", "TextAbsorber() #pdf_document.pages.accept(text_absorber) #if render_text_as_bookfold: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) #", "how to set Aspose.Words to skip embedding Arial and Times New Roman fonts", "\"Courier New\" is a nonstandard font. builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name =", "entry to collapse/expand all its sub-entries. # Set the \"expanded_outline_levels\" property to \"2\"", "doc = aw.Document(MY_DIR + \"WMF with image.docx\") # metafile_rendering_options = aw.saving.MetafileRenderingOptions() # #", ") 0 (1) 0 (0) 0 (.) 0 ( ) 0 (N) 0", "document using a reader such as Adobe Acrobat, we will see the document", "# warning_callback = ExPdfSaveOptions.RenderCallback() # doc.warning_callback = warning_callback # save_options = aw.saving.PdfSaveOptions() #", "fold all the pages down the middle at once, # and the contents", "sure that all the fields will display # the most accurate values in", "an EMF+ dual metafile. # Set the \"emf_plus_dual_rendering_mode\" property to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to", "0 (b) 0 (e) 0 (r) -1 ( ) 1 (2) -1 (0)", "to .PDF. save_options = aw.saving.PdfSaveOptions() # Set the \"compliance\" property to \"PdfCompliance.PDF_A1B\" to", "\"create_outlines_for_headings_in_tables\" property to \"True\" to include all headings within tables # in the", "allow nonstandard fonts' embedding in the output PDF. # Set the \"font_embedding_mode\" property", "enable or disable subsetting when embedding fonts while rendering a document to PDF.", ".PDF. options = aw.saving.PdfSaveOptions() # Create a digital signature and assign it to", "skip embedding Arial and Times New Roman fonts into a PDF document. doc", "it with the \"save\" method. signing_time = datetime.now() options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\",", "Configure our font sources to ensure that we have access to both the", "if i < 4: builder.insert_break(aw.BreakType.PAGE_BREAK) # Create a \"PdfSaveOptions\" object that we can", "This will make sure that all the fields will display # the most", "#ExFor:PdfSaveOptions.page_mode #ExFor:PdfPageMode #ExSummary:Shows to process bookmarks in headers/footers in a document that we", "# self.assertEqual(15, table_absorber.table_list.count) def test_export_document_structure(self): for export_document_structure in (False, True): with self.subTest(export_document_structure=export_document_structure): #ExStart", "# self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version) def test_text_compression(self): for pdf_text_compression in (aw.saving.PdfTextCompression.NONE, aw.saving.PdfTextCompression.FLATE):", "all DrawingML effects. # Set the \"dml_effects_rendering_mode\" property to \"DmlEffectsRenderingMode.SIMPLIFIED\" # to render", "objects of all sections to \"MultiplePagesType.BOOK-FOLD_PRINTING\". if render_text_as_bookfold: for section in doc.sections: section", "options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1", "-1 (018)] TJ\", # tj_operator.to_string()) def test_save_as_pdf_book_fold(self): for render_text_as_bookfold in (False, True): with", "that method converts the document to .PDF. options = aw.saving.PdfSaveOptions() # By default,", "headings inside tables. doc = aw.Document() builder = aw.DocumentBuilder(doc) # Create a table", "+ \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1])", "R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0", "delta=0.001) def test_embed_full_fonts(self): for embed_full_fonts in (False, True): with self.subTest(embed_full_fonts=embed_full_fonts): #ExStart #ExFor:PdfSaveOptions.__init__ #ExFor:PdfSaveOptions.embed_full_fonts", "the document as a booklet, we must set the \"multiple_pages\" # properties of", "property to \"DmlRenderingMode.DRAWING_ML\" # to render the DML shapes themselves. options.dml_rendering_mode = dml_rendering_mode", "aw.saving.PdfSaveOptions() # Since our document contains a custom font, embedding in the output", "\"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", options) #ExEnd self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\").has_digital_signature) with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature_timestamp.pdf\", \"rb\") as file:", "change the resolution of images in the PDF document. doc = aw.Document(MY_DIR +", "Set the \"default_bookmarks_outline_level\" property to \"1\" to display all # bookmarks at the", "0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self): for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO,", "all headings whose levels are above 4 from the outline. options.outline_options.headings_outline_levels = 4", "StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #2\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #2\",", "render the document as usual. save_options.additional_text_positioning = apply_additional_text_positioning doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd", "to \"True\" to save form fields as interactive objects in the output PDF.", "\"PdfSaveOptions.doc_title.pdf\") #self.assertEqual(display_doc_title, pdf_document.display_doc_title) #self.assertEqual(\"Windows bar pdf title\", pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in", "to \"EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK\" # to render the EMF+ part of an EMF+ dual metafile", "to .PDF and applies the configuration # # in our MetafileRenderingOptions object to", "#8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\",", "entries of levels 1, 2, and then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading", "2 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\", options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.expanded_outline_levels.pdf\") #self.assertEqual(1, pdf_document.outlines.count)", "#ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_fallback.pdf\", \"rb\") as file: content = file.read() if dml_rendering_mode", "1.2.1\") builder.writeln(\"Heading 1.2.2\") # Create a \"PdfSaveOptions\" object that we can pass to", "[0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group", "document to .PDF. save_options = aw.saving.PdfSaveOptions() # Enable encryption via the \"encryption_details\" property.", "\"False\" to not export the document structure. options.export_document_structure = export_document_structure # Suppose we", "in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a", "world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\"", "bookmarks.count) #endif def test_table_heading_outlines(self): for create_outlines_for_headings_in_tables in (False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables", "b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85", "#ExFor:PdfSaveOptions.preserve_form_fields #ExSummary:Shows how to save a document to the PDF format using the", "to enable or disable subsetting when embedding fonts while rendering a document to", "\"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read()) def test_image_compression(self):", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\") #image_placement_absorber = aspose.pdf.ImagePlacementAbsorber() #image_placement_absorber.visit(pdf_document.pages[1]) #table_absorber = aspose.pdf.text.TableAbsorber()", "= aw.Document() builder = aw.DocumentBuilder(doc) for i in range(100): builder.writeln(\"Lorem ipsum dolor sit", "header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, # aw.saving.HeaderFooterBookmarksExportMode.ALL): # self.assertIn(f\"<</Type /Catalog/Pages 3 0 R/Outlines 14 0", "interpolation. save_options.interpolate_images = interpolate_images # When we open this document with a reader", "0]>>\", content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0", "R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\\r\\n\", data) # self.assertEqual(0, pdf_doc.outlines.count) #elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST, #", "pdf_document.font_utilities.get_all_fonts() #self.assertEqual(\"ArialMT\", pdf_doc_fonts[0].font_name) #self.assertEqual( # pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL, # pdf_doc_fonts[0].is_embedded) #self.assertEqual(\"CourierNewPSMT\", pdf_doc_fonts[1].font_name) #self.assertEqual(", "to render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we are rendering", "password before accessing its contents. doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.encryption_permissions.pdf\", save_options) #ExEnd #with self.assertRaises(Exception): #", "# save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback() # doc.warning_callback = save_warning_callback # doc.save(ARTIFACTS_DIR + \"PdfSaveOption.unsupported_image_format_warning.pdf\", aw.SaveFormat.PDF)", "builder = aw.DocumentBuilder(doc) # Insert headings that can serve as TOC entries of", "such \"sub-entries\". # In our document, the outline entries from the 5th heading", "the document. # Set the \"page_mode\" property to \"PdfPageMode.USE_OUTLINES\" to get the PDF", "PDF documents. doc = aw.Document(MY_DIR + \"Images.docx\") # Create a \"PdfSaveOptions\" object that", "save form fields as interactive objects in the output PDF. # Set the", "= aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name = \"Courier", "the \"font_embedding_mode\" property to \"EMBED_NONE\" to not embed any fonts in the output", "a document as we export it to PDF. doc = aw.Document() builder =", "# \"Page 2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 4", "color space for images in the document that it converts to PDF. #", "numbers. # Set the \"numeral_format\" property to \"NumeralFormat.CONTEXT\" to # look up the", "#self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows how to limit", "downsample all images to 36 ppi. options.downsample_options.resolution = 36 # Set the \"resolution_threshold\"", "== aw.saving.DmlRenderingMode.FALLBACK: self.assertIn( b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox", "be any, at the cost of increased file size. # Set the \"additional_text_positioning\"", "self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\") builder.writeln(\"Heading 1.2\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3", "# use glyphs from the U+06F0 to U+06F9 range as numbers. # Set", "changing type of warnings about unsupported metafile records. #def test_handle_binary_raster_warnings(self): # doc =", "property's value. pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR", "content) self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R", "#ExSummary:Shows how to update all the fields in a document immediately before saving", "reader to open the saved # document in full-screen mode, which takes over", "interpreting our document. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.paragraph_format.style = doc.styles.get_by_name(\"Heading 1\")", "property to \"PdfPageMode.USE_NONE\" to get the PDF reader to display just the document", "a digital signature and assign it to our SaveOptions object to sign the", "3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group", "text we will format in a heading-type style, will serve as the column", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #8\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)) # self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL)", "= aspose.pdf.facades.PdfBookmarkEditor() #bookmark_editor.bind_pdf(ARTIFACTS_DIR + \"PdfSaveOptions.headings_outline_levels.pdf\") #bookmarks = bookmark_editor.extract_bookmarks() #self.assertEqual(3, bookmarks.count) def test_create_missing_outline_levels(self): for", "\"True\" to turn all footnote/endnote symbols # in the text act as links", "document. options.embed_full_fonts = embed_full_fonts doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_full_fonts.pdf\", options) if embed_full_fonts: self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR +", "are sub-entries of the second 3rd level entry, and so on. # In", "\"PdfSaveOptions.embed_core_fonts.pdf\")) else: self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_core_fonts.pdf\") #pdf_doc_fonts", "all of the EMF+ records are supported. # Otherwise, Aspose.Words will render the", "document when we save it to PDF. certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\")", "fold. doc = aw.Document(MY_DIR + \"Paragraphs.docx\") # Create a \"PdfSaveOptions\" object that we", "(t) 0 (a) -1 (g) 1 (,) 0 ( ) 0 (1) 0", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #with open(ARTIFACTS_DIR + \"PdfSaveOptions.preserve_form_fields.pdf\", 'rb') as file: # content =", "options = aw.saving.PdfSaveOptions() # Set the \"use_core_fonts\" property to \"True\" to replace some", "aw.saving.PdfSaveOptions() # Configure the \"digital_signature_details\" object of the \"SaveOptions\" object to # digitally", "processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\", options) #ExEnd #pdf_document", "with their fallback shapes. # Set the \"dml_rendering_mode\" property to \"DmlRenderingMode.DRAWING_ML\" # to", "elit, \" + \"sed do eiusmod tempor incididunt ut labore et dolore magna", "get the PDF reader to display a separate panel # with a thumbnail", "to .PDF. options = aw.saving.PdfSaveOptions() # The output PDF document will contain an", "save to PDF to 220 ppi. self.assertTrue(options.downsample_options.downsample_images) self.assertEqual(220, options.downsample_options.resolution) self.assertEqual(0, options.downsample_options.resolution_threshold) doc.save(ARTIFACTS_DIR +", "Set the \"display_doc_title\" to \"False\" to get such readers to display the document's", "to \"36\" to downsample all images to 36 ppi. options.downsample_options.resolution = 36 #", "that all the fields will display # the most accurate values in the", "b\"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85", "to PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.writeln(\"Jpeg image:\") builder.insert_image(IMAGE_DIR + \"Logo.jpg\")", "+ \"PdfSaveOptions.handle_binary_raster_warnings.pdf\", save_options) # self.assertEqual(1, callback.warnings.count) # self.assertEqual(\"'R2_XORPEN' binary raster operation is partly", "the \"headings_outline_levels\" property to \"1\" to get the outline # to only register", "self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode): #ExStart #ExFor:PdfImageColorSpaceExportMode #ExFor:PdfSaveOptions.image_color_space_export_mode #ExSummary:Shows how to set a different color space for", "\"PdfSaveOptions.downsample_options.default.pdf\") #pdf_doc_image = pdf_document.pages[1].resources.images[1] #self.assertLess(300000, pdf_doc_image.to_stream().length) #self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in", "with their PDF Type 1 equivalents. # Set the \"use_core_fonts\" property to \"False\"", "open the saved # document in full-screen mode, which takes over the monitor's", "aw.digitalsignatures.CertificateHolder.create(MY_DIR + \"morzal.pfx\", \"aw\") options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, \"Test Signing\", \"Aspose Office\", datetime.now()) #", "to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to # use glyphs from the U+06F0 to U+06F9 range as", "= pdf_image_color_space_export_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.image_color_space_export_mode.pdf\") #pdf_doc_image", "#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.fonts_scaled_to_metafile_size.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber) #text_fragment_rectangle = text_absorber.text_fragments[3].rectangle #self.assertAlmostEqual(1.589", "StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #5\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL)", "self.assertIn( # b\"5 0 obj\\r\\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0", "custom properties within the output PDF document. # Set the \"custom_properties_export\" property to", "self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\") #pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()", "# self.assertEqual(\"1.7\", pdf_document.version) #elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U: # self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format) # self.assertEqual(\"1.7\", pdf_document.version)", "doc = aw.Document() builder = aw.DocumentBuilder(doc) img = drawing.Image.from_file(IMAGE_DIR + \"Transparent background logo.png\")", "os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD: self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\")) elif pdf_font_embedding_mode", "( ) 0 (N) 0 (o) 0 (v) 0 (e) 0 (m) 0", "how to save a document to the PDF format using the Save method", "with self.subTest(interpolate_images=interpolate_images): #ExStart #ExFor:PdfSaveOptions.interpolate_images #ExSummary:Shows how to perform interpolation on images while saving", "do eiusmod tempor incididunt ut labore et dolore magna aliqua.\") # Create a", "0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0 R/X4 13 0", "fonts in the output PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if", "interpolate images. # Their resolution should be lower than that of the device", "with \"PDF/A-1b\" as well as preserving the document structure of the original document.", "\"HeaderFooterBookmarksExportMode.FIRST\" to # only export bookmarks in the first section's header/footers. # Set", "an option to optimize memory consumption when rendering large documents to PDF. doc", "all images that end up in the output PDF. pdf_save_options.image_compression = pdf_image_compression #", "to .PDF. options = aw.saving.PdfSaveOptions() # Set the \"custom_properties_export\" property to \"PdfCustomPropertiesExport.NONE\" to", "= 128 # Only the first two images from the document will be", "builder = aw.DocumentBuilder(doc) builder.writeln(\"Hello world!\") builder.writeln(\"Hola mundo!\") save_options = aw.saving.PdfSaveOptions() # Note, when", "aw.saving.PdfSaveOptions() # Set the \"page_mode\" property to \"PdfPageMode.FULL_SCREEN\" to get the PDF reader", "# aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK): # self.assertEqual(0, pdf_document.pages[1].resources.images.count) # self.assertIn( # b\"5 0 obj\\r\\n\" + #", "can clone PdfSaveOptions objects. options_copy = options.clone() doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\", options) #ExEnd #pdf_document", "pdf_doc_fonts[0].is_embedded) #self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded) def test_additional_text_positioning(self): for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart", "color_mode == aw.saving.ColorMode.GRAYSCALE: # self.assertLess(1000000, pdf_doc_image.to_stream().length) # self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type()) def test_doc_title(self): for display_doc_title", "0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\",", "+ \"PdfSaveOptions.text_compression.pdf\", \"rb\") as file: self.assertIn(b\"12 0 obj\\r\\n<</Length 13 0 R/Filter /FlateDecode>>stream\", file.read())", "#6\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #6\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #7\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #7\",", "self.assertEqual(\"Bookmark_2\", outline_item_collection[2].title) # self.assertEqual(\"1 XYZ 84 47 0\", outline_item_collection[2].destination.to_string()) # self.assertEqual(\"Bookmark_3\", outline_item_collection[3].title) #", "R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group", "# which complies with \"PDF/A-1b\" as well as preserving the document structure of", "to \"True\" to render embedded EMF data # for metafiles that we can", "that they open new pages when we click on them. doc = aw.Document()", "PDF. doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.font.name = \"Arial\" builder.writeln(\"Hello world!\") builder.font.name", "0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10", "as we render it with the \"save\" method. signing_time = datetime.now() options.digital_signature_details =", "then 3. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\") builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2 builder.writeln(\"Heading 1.1\")", "stream: # Create a \"PdfSaveOptions\" object that we can pass to the document's", "outline_item_collection[4].destination.to_string()) #def test_unsupported_image_format_warning(self): # doc = aw.Document(MY_DIR + \"Corrupted image.docx\") # save_warning_callback =", "builder = aw.DocumentBuilder(doc) # Insert headings of levels 1 to 5. builder.paragraph_format.style_identifier =", "include all headings within tables # in the outline, provided that they have", "self.assertRaises(Exception): # self.verify_image(400, 400, pdf_doc_image_stream) def test_image_color_space_export_mode(self): for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO, aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK): with", "is provided # \"as is\", without warranty of any kind, either expressed or", "of glyphs to use. # Set the \"numeral_format\" property to \"NumeralFormat.EASTERN_ARABIC_INDIC\" to #", "/JavaScript/JS(app.launchURL\\\\(\\\"https://www.google.com/search?q=%20aspose\\\", True\\\\);)>>>>\", content) else: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS \"", "at the cost of image quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options)", "PDF. options.font_embedding_mode = pdf_font_embedding_mode doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.embed_windows_fonts.pdf\", options) if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL: self.assertLess(1000000,", "#2\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL)) # self.assertTrue(text_absorber.text.index_of(\"Heading #3\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #4\",", "<</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>\", content) else: self.assertIn( b\"5 0 obj\\r\\n\" + b\"<</Type /Page/Parent 3", "to determine what number of glyphs to use. # Set the \"numeral_format\" property", "how to save hyperlinks in a document we convert to PDF so that", "space for all images in the saved PDF. # Aspose.Words will also apply", "R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>\", content) def test_preblend_images(self): for preblend_images in (False, True): with self.subTest(preblend_images=preblend_images): #ExStart", "with open(ARTIFACTS_DIR + \"PdfSaveOptions.page_mode.pdf\", \"rb\") as file: content = file.read().decode('utf-8') if page_mode ==", "document. # Set the \"interpolate_images\" property to \"False\" to make it so that", "8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>\", content) #def test_dml3d_effects_rendering_mode_test(self): # doc", "= aw.saving.PdfPageMode.USE_OUTLINES # Set the \"default_bookmarks_outline_level\" property to \"1\" to display all #", "this will have. options.text_compression = pdf_text_compression doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.text_compression.pdf\", options) #ExEnd if pdf_text_compression", "(False, True): with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables): #ExStart #ExFor:OutlineOptions.create_outlines_for_headings_in_tables #ExSummary:Shows how to create PDF document outline", "also with more processing cost. options.dml_effects_rendering_mode = effects_rendering_mode self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.drawing_ml_effects.pdf\",", "pdf_document.info.title) def test_memory_optimization(self): for memory_optimization in (False, True): with self.subTest(memory_optimization=memory_optimization): #ExStart #ExFor:SaveOptions.create_save_options(SaveFormat) #ExFor:SaveOptions.memory_optimization", "space for images in a document as we export it to PDF. doc", "aw.saving.Dml3DEffectsRenderingMode.ADVANCED # doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf\", save_options) # self.assertEqual(38, warning_callback.count) #class RenderCallback(aw.IWarningCallback): # def", "aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\" + # \"Page 4 (even)\", text_absorber.text) #pdf_document =", "\"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of 2\" if update_fields else \"Page", "+ \"PdfSaveOptions.additional_text_positioning.pdf\", save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\") #text_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages[1].accept(text_absorber)", "pdf_document.info[\"Company\"]) def test_drawing_ml_effects(self): for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE, aw.saving.DmlEffectsRenderingMode.SIMPLIFIED, aw.saving.DmlEffectsRenderingMode.FINE): with self.subTest(effects_rendering_mode=effects_rendering_mode): #ExStart #ExFor:DmlRenderingMode", "property to \"GRAYSCALE\" to render all images from the document in black and", "options.outline_options.headings_outline_levels = 4 # If an outline entry has subsequent entries of a", "fox jumps over the lazy dog.\") # Create a \"PdfSaveOptions\" object that we", "/Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS:", "quality. pdf_save_options.jpeg_quality = 10 doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.image_compression.pdf\", pdf_save_options) #ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR +", "\"PdfImageColorSpaceExportMode.SIMPLE_CMYK\" # to use the CMYK color space for all images in the", "##ExEnd def test_header_footer_bookmarks_export_mode(self): for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE, aw.saving.HeaderFooterBookmarksExportMode.FIRST, aw.saving.HeaderFooterBookmarksExportMode.ALL): with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode): #ExStart #ExFor:HeaderFooterBookmarksExportMode", "text_absorber.text) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.odd.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 1 (odd)\\r\\n\"", "= aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text) def test_headings_outline_levels(self): #ExStart #ExFor:ParagraphFormat.is_heading #ExFor:PdfSaveOptions.outline_options #ExFor:PdfSaveOptions.save_format #ExSummary:Shows", "\"PdfSaveOptions.open_hyperlinks_in_new_window.pdf\", \"rb\") as file: content = file.read() if open_hyperlinks_in_new_window: self.assertIn( b\"<</Type /Annot/Subtype /Link/Rect", "= aw.saving.PdfSaveOptions() # Set the \"update_fields\" property to \"False\" to not update all", "= aw.WarningInfoCollection() # def warning(self, info: aw.WarningInfo): # if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS: #", "# \"[(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e) (\\xFE\\xFF\\0B\\0a\\0n\\0a\\0n\\0a) (\\xFE\\xFF\\0C\\0h\\0e\\0r\\0r\\0y) ]/V(\\xFE\\xFF\\0A\\0p\\0p\\0l\\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>\",", "the color space for images in the document that it converts to PDF.", "only the even-numbered pages: options.page_set = aw.saving.PageSet.even doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\", options) # 2", "# self.assertFalse(text_absorber.text.index_of(\"Heading #9\", StringComparison.ORDINAL) < text_absorber.text.index_of(\"Heading #10\", StringComparison.ORDINAL)) #else: # self.assertTrue(text_absorber.text.index_of(\"Heading #1\", StringComparison.ORDINAL)", "method # to modify how that method converts the document to .PDF. options", "documentation, and is provided # \"as is\", without warranty of any kind, either", "to # digitally sign the document as we render it with the \"save\"", "PDF. # Set the \"font_embedding_mode\" property to \"EMBED_NONSTANDARD\" to only allow nonstandard fonts'", "128 ppi. options.downsample_options.resolution_threshold = 128 # Only the first two images from the", "open this document with a reader such as Adobe Acrobat, we will need", "TOC entries of levels 1 and 5. builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 self.assertTrue(builder.paragraph_format.is_heading) builder.writeln(\"Heading 1\")", "well as preserving the document structure of the original document. # This helps", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.export_page_set.even.pdf\") #text_absorber = aspose.pdf.text.TextAbsorber() #pdf_document.pages.accept(text_absorber) #self.assertEqual(\"Page 2 (even)\\r\\n\"", "#ExEnd #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.update_fields.pdf\") #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 1 of", "# self.assertEqual(\"Apple\", field.value) #else: # self.assertEqual(\"Please select a fruit: Apple\", text_fragment_absorber.text) # self.assertNotIn(\"/Widget\",", "in (aw.saving.DmlRenderingMode.FALLBACK, aw.saving.DmlRenderingMode.DRAWING_ML): with self.subTest(dml_rendering_mode=dml_rendering_mode): #ExStart #ExFor:DmlRenderingMode #ExFor:SaveOptions.dml_rendering_mode #ExSummary:Shows how to render fallback", "header. builder.start_table() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1 builder.write(\"Customers\") builder.end_row() builder.insert_cell() builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL builder.write(\"<NAME>\")", "function as hyperlinks. doc = aw.Document(MY_DIR + \"Footnotes and endnotes.docx\") # Create a", "= aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK # # Create a \"PdfSaveOptions\" object that we can pass to", "doc = aw.Document() builder = aw.DocumentBuilder(doc) builder.write(\"Please select a fruit: \") # Insert", "control the quality of the Jpeg images that end up in the output", "test_dml3d_effects_rendering_mode_test(self): # doc = aw.Document(MY_DIR + \"DrawingML shape 3D effects.docx\") # warning_callback =", "for apply_additional_text_positioning in (False, True): with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning): #ExStart #ExFor:PdfSaveOptions.additional_text_positioning #ExSummary:Show how to write", "content) #pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.note_hyperlinks.pdf\") #page = pdf_document.pages[1] #annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL))", "\"False\" to render the PDF normally. options.use_book_fold_printing_settings = render_text_as_bookfold # If we are", "# self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + \"PdfSaveOptions.additional_text_positioning.pdf\")) # self.assertEqual( # \"[(Samsta) -1 (g) 1 (,", "#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type()) def test_color_rendering(self): for color_mode in (aw.saving.ColorMode.GRAYSCALE, aw.saving.ColorMode.NORMAL): with self.subTest(color_mode=color_mode): #ExStart #ExFor:PdfSaveOptions", "images to 36 ppi. options.downsample_options.resolution = 36 # Set the \"resolution_threshold\" property to", "R>>\\r\\n\", content) elif page_mode == aw.saving.PdfPageMode.USE_THUMBS: self.assertIn( f\"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata", "= aspose.pdf.Document(ARTIFACTS_DIR + \"PdfSaveOptions.one_page.pdf\") #self.assertEqual(1, pdf_document.pages.count) #text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber() #pdf_document.pages.accept(text_fragment_absorber) #self.assertEqual(\"Page 2.\", text_fragment_absorber.text)", "= aw.Document(MY_DIR + \"Rendering.docx\") # Create a \"PdfSaveOptions\" object that we can pass", "to preblend images with transparent backgrounds while saving a document to PDF. doc", "self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date) doc.save(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", options) #ExEnd with open(ARTIFACTS_DIR + \"PdfSaveOptions.pdf_digital_signature.pdf\", \"rb\") as", "\"Page 2 (even)\\r\\n\" + # \"Page 3 (odd)\\r\\n\" + # \"Page 4 (even)\\r\\n\"", "doc = aw.Document() builder = aw.DocumentBuilder(doc) # Insert headings of levels 1 to", "New Roman fonts into a PDF document. doc = aw.Document() builder = aw.DocumentBuilder(doc)", "glyph of every embedded font in the output PDF. # The document's size", "aw.saving.PdfSaveOptions() # Note, when \"export_document_structure\" is \"False\", \"export_language_to_span_tag\" is ignored. save_options.export_document_structure = True" ]
[ "details. def strip_quotes(s): if s[0]==s[-1] and s[0] in ['\"',\"'\"]: return s[1:-1] else: return", "BSD 3-Clause License. See LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1] and s[0]", "2021, <NAME> # Licensed under BSD 3-Clause License. See LICENSE.txt for details. def", "# Licensed under BSD 3-Clause License. See LICENSE.txt for details. def strip_quotes(s): if", "Copyright (c) 2021, <NAME> # Licensed under BSD 3-Clause License. See LICENSE.txt for", "<NAME> # Licensed under BSD 3-Clause License. See LICENSE.txt for details. def strip_quotes(s):", "See LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1] and s[0] in ['\"',\"'\"]: return", "Licensed under BSD 3-Clause License. See LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1]", "(c) 2021, <NAME> # Licensed under BSD 3-Clause License. See LICENSE.txt for details.", "LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1] and s[0] in ['\"',\"'\"]: return s[1:-1]", "for details. def strip_quotes(s): if s[0]==s[-1] and s[0] in ['\"',\"'\"]: return s[1:-1] else:", "3-Clause License. See LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1] and s[0] in", "under BSD 3-Clause License. See LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1] and", "# Copyright (c) 2021, <NAME> # Licensed under BSD 3-Clause License. See LICENSE.txt", "License. See LICENSE.txt for details. def strip_quotes(s): if s[0]==s[-1] and s[0] in ['\"',\"'\"]:", "def strip_quotes(s): if s[0]==s[-1] and s[0] in ['\"',\"'\"]: return s[1:-1] else: return s" ]
[ "Turbine class Represents a turbine in the Rankine cycle \"\"\" def __init__(self, inletState):", "\"\"\" Simulates the turbine and tries to have the exit quality as desiredOutletQuality.", "on the Turbine's inlet. Must be an IAPWS97 object \"\"\" if not isinstance(inletState,", "quality of the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s) self.workExtracted = -", "turbine with the previous conditions inletState: The state of the steam on the", "so by progressively and isentropically extracting work from the turbine until the desired", "IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of type", "inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and tries to have the", "raise TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure):", "in the Rankine cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes the turbine with", "until the desired outlet quality is reached desiredOutletQuality: The quality of the turbine", "The quality of the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s) self.workExtracted =", "of the steam on the Turbine's inlet. Must be an IAPWS97 object \"\"\"", "type iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and", "turbine and tries to have the exit quality as desiredOutletQuality. It does so", "The state of the steam on the Turbine's inlet. Must be an IAPWS97", "iapws class Turbine(): \"\"\" Turbine class Represents a turbine in the Rankine cycle", "the steam on the Turbine's inlet. Must be an IAPWS97 object \"\"\" if", "isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState = inletState def", "turbine until the desired outlet quality is reached desiredOutletQuality: The quality of the", "class Represents a turbine in the Rankine cycle \"\"\" def __init__(self, inletState): \"\"\"", "exit quality as desiredOutletQuality. It does so by progressively and isentropically extracting work", "be of type iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the", "reached desiredOutletQuality: The quality of the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s)", "Turbine(): \"\"\" Turbine class Represents a turbine in the Rankine cycle \"\"\" def", "desiredOutletPressure): \"\"\" Simulates the turbine and tries to have the exit quality as", "not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState = inletState", "the turbine until the desired outlet quality is reached desiredOutletQuality: The quality of", "with the previous conditions inletState: The state of the steam on the Turbine's", "from the turbine until the desired outlet quality is reached desiredOutletQuality: The quality", "by progressively and isentropically extracting work from the turbine until the desired outlet", "isentropically extracting work from the turbine until the desired outlet quality is reached", "<gh_stars>1-10 import iapws class Turbine(): \"\"\" Turbine class Represents a turbine in the", "object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of type iawps.IAWPS97\")", "Must be an IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should", "as desiredOutletQuality. It does so by progressively and isentropically extracting work from the", "the desired outlet quality is reached desiredOutletQuality: The quality of the turbine exit", "iapws.IAPWS97): raise TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState = inletState def simulate(self,", "to have the exit quality as desiredOutletQuality. It does so by progressively and", "class Turbine(): \"\"\" Turbine class Represents a turbine in the Rankine cycle \"\"\"", "simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and tries to have the exit quality", "have the exit quality as desiredOutletQuality. It does so by progressively and isentropically", "inletState): \"\"\" Initializes the turbine with the previous conditions inletState: The state of", "Initializes the turbine with the previous conditions inletState: The state of the steam", "Turbine's inlet. Must be an IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise", "extracting work from the turbine until the desired outlet quality is reached desiredOutletQuality:", "be an IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be", "iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and tries", "def __init__(self, inletState): \"\"\" Initializes the turbine with the previous conditions inletState: The", "of type iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine", "quality is reached desiredOutletQuality: The quality of the turbine exit \"\"\" self.exitState =", "does so by progressively and isentropically extracting work from the turbine until the", "state of the steam on the Turbine's inlet. Must be an IAPWS97 object", "inlet. Must be an IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState", "inletState: The state of the steam on the Turbine's inlet. Must be an", "Represents a turbine in the Rankine cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes", "the Rankine cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes the turbine with the", "progressively and isentropically extracting work from the turbine until the desired outlet quality", "tries to have the exit quality as desiredOutletQuality. It does so by progressively", "work from the turbine until the desired outlet quality is reached desiredOutletQuality: The", "def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and tries to have the exit", "of the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s) self.workExtracted = - self.exitState.h", "cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes the turbine with the previous conditions", "the turbine and tries to have the exit quality as desiredOutletQuality. It does", "the turbine with the previous conditions inletState: The state of the steam on", "__init__(self, inletState): \"\"\" Initializes the turbine with the previous conditions inletState: The state", "TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\"", "if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState =", "\"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of type iawps.IAWPS97\") self.inletState", "and tries to have the exit quality as desiredOutletQuality. It does so by", "quality as desiredOutletQuality. It does so by progressively and isentropically extracting work from", "and isentropically extracting work from the turbine until the desired outlet quality is", "desired outlet quality is reached desiredOutletQuality: The quality of the turbine exit \"\"\"", "= inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and tries to have", "previous conditions inletState: The state of the steam on the Turbine's inlet. Must", "turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s) self.workExtracted = - self.exitState.h + self.inletState.h", "Rankine cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes the turbine with the previous", "the exit quality as desiredOutletQuality. It does so by progressively and isentropically extracting", "the previous conditions inletState: The state of the steam on the Turbine's inlet.", "It does so by progressively and isentropically extracting work from the turbine until", "should be of type iawps.IAWPS97\") self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates", "turbine in the Rankine cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes the turbine", "steam on the Turbine's inlet. Must be an IAPWS97 object \"\"\" if not", "self.inletState = inletState def simulate(self, desiredOutletPressure): \"\"\" Simulates the turbine and tries to", "a turbine in the Rankine cycle \"\"\" def __init__(self, inletState): \"\"\" Initializes the", "import iapws class Turbine(): \"\"\" Turbine class Represents a turbine in the Rankine", "Simulates the turbine and tries to have the exit quality as desiredOutletQuality. It", "desiredOutletQuality: The quality of the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s) self.workExtracted", "desiredOutletQuality. It does so by progressively and isentropically extracting work from the turbine", "the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure, s=self.inletState.s) self.workExtracted = - self.exitState.h +", "the Turbine's inlet. Must be an IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97):", "outlet quality is reached desiredOutletQuality: The quality of the turbine exit \"\"\" self.exitState", "is reached desiredOutletQuality: The quality of the turbine exit \"\"\" self.exitState = iapws.IAPWS97(P=desiredOutletPressure,", "an IAPWS97 object \"\"\" if not isinstance(inletState, iapws.IAPWS97): raise TypeError(\"inletState should be of", "\"\"\" def __init__(self, inletState): \"\"\" Initializes the turbine with the previous conditions inletState:", "\"\"\" Initializes the turbine with the previous conditions inletState: The state of the", "\"\"\" Turbine class Represents a turbine in the Rankine cycle \"\"\" def __init__(self,", "conditions inletState: The state of the steam on the Turbine's inlet. Must be" ]
[ "table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page') # ### end Alembic commands", "sqlalchemy as sa # revision identifiers, used by Alembic. revision = '0b840782b66f' down_revision", "# ### commands auto generated by Alembic - please adjust! ### op.create_table('page', sa.Column('id',", "nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False)", "# ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track')", "op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True),", "please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track')", "nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'),", "sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties',", "sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64),", "op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page')", "nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True),", "- please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'),", "0b840782b66f Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import op import sqlalchemy", "by Alembic. revision = '0b840782b66f' down_revision = None branch_labels = None depends_on =", "nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False)", "generated by Alembic - please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24),", "sa # revision identifiers, used by Alembic. revision = '0b840782b66f' down_revision = None", "unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False),", "alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic.", "nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True),", "sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(),", "['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(),", "op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'),", "['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end", "nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'),", "sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid',", "'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track',", "= None branch_labels = None depends_on = None def upgrade(): # ### commands", "['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'],", "nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8',", "op import sqlalchemy as sa # revision identifiers, used by Alembic. revision =", "sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer',", "adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid',", "'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64),", "'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ###", "def downgrade(): # ### commands auto generated by Alembic - please adjust! ###", "\"\"\"Initial model again Revision ID: 0b840782b66f Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from", "table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'),", "sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid',", "unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False)", "revision identifiers, used by Alembic. revision = '0b840782b66f' down_revision = None branch_labels =", "sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent',", "nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True),", "downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'),", "sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512),", "'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end Alembic commands ###", "['vid'], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands", "model again Revision ID: 0b840782b66f Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic", "### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'),", "sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'],", "'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id',", "'track', ['vid'], unique=False) # ### end Alembic commands ### def downgrade(): # ###", "2020-10-27 17:24:10.636183 \"\"\" from alembic import op import sqlalchemy as sa # revision", "table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'),", "Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track')", "op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page') #", "import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision", "unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end Alembic", "please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True),", "unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid',", "down_revision = None branch_labels = None depends_on = None def upgrade(): # ###", "### def downgrade(): # ### commands auto generated by Alembic - please adjust!", "as sa # revision identifiers, used by Alembic. revision = '0b840782b66f' down_revision =", "sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048),", "commands auto generated by Alembic - please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False),", "None depends_on = None def upgrade(): # ### commands auto generated by Alembic", "auto generated by Alembic - please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid',", "None branch_labels = None depends_on = None def upgrade(): # ### commands auto", "Revision ID: 0b840782b66f Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import op", "op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end Alembic commands ### def downgrade(): #", "['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'],", "sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip',", "nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True),", "unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end Alembic commands ### def downgrade():", "nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True),", "sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'),", "server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'],", "commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track')", "ID: 0b840782b66f Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import op import", "nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True),", "### commands auto generated by Alembic - please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(),", "again Revision ID: 0b840782b66f Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import", "mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'],", "Alembic commands ### def downgrade(): # ### commands auto generated by Alembic -", "branch_labels = None depends_on = None def upgrade(): # ### commands auto generated", ") op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False)", "\"\"\" from alembic import op import sqlalchemy as sa # revision identifiers, used", "op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'),", "op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end Alembic commands", "Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import op import sqlalchemy as sa", "end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic", "sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page',", "op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track',", "op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event',", "op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page') # ### end Alembic", "by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'),", "sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page',", "nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'),", "sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' )", "sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid',", "sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),", "def upgrade(): # ### commands auto generated by Alembic - please adjust! ###", "revision = '0b840782b66f' down_revision = None branch_labels = None depends_on = None def", "sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'),", "nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False)", "op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) #", "op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page')", "op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page') # ### end Alembic commands ###", "mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'],", "sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at',", "adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track')", "op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page')", "commands ### def downgrade(): # ### commands auto generated by Alembic - please", "nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False)", "auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'),", "sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track',", "= None depends_on = None def upgrade(): # ### commands auto generated by", "['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'],", "nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True),", "sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64),", "table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'),", "sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'),", "### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36),", "sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track',", "'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track',", "by Alembic - please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True),", "sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url',", "mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page',", "= None def upgrade(): # ### commands auto generated by Alembic - please", "unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False)", "### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track') op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'),", "used by Alembic. revision = '0b840782b66f' down_revision = None branch_labels = None depends_on", "unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False)", "unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto", "sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128),", "op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'),", "op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'),", "unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False) op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False)", "['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid',", "'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page',", "import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '0b840782b66f'", "server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'],", "op.drop_index(op.f('ix_track_created_at'), table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page')", "op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page')", "None def upgrade(): # ### commands auto generated by Alembic - please adjust!", "depends_on = None def upgrade(): # ### commands auto generated by Alembic -", "mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track',", "'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page',", "sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'],", "upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('page',", "Alembic. revision = '0b840782b66f' down_revision = None branch_labels = None depends_on = None", "identifiers, used by Alembic. revision = '0b840782b66f' down_revision = None branch_labels = None", "Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import op import sqlalchemy as sa #", "sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB'", "# ### end Alembic commands ### def downgrade(): # ### commands auto generated", "op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24),", "Alembic - please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid',", "'0b840782b66f' down_revision = None branch_labels = None depends_on = None def upgrade(): #", "sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36),", "generated by Alembic - please adjust! ### op.drop_index(op.f('ix_track_vid'), table_name='track') op.drop_index(op.f('ix_track_uid'), table_name='track') op.drop_index(op.f('ix_track_sid'), table_name='track')", "unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36),", "17:24:10.636183 \"\"\" from alembic import op import sqlalchemy as sa # revision identifiers,", "table_name='track') op.drop_index(op.f('ix_track_cid'), table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'),", "sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'),", "op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36), nullable=True), sa.Column('cid', sa.String(length=36), nullable=True),", ") op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False) op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False) op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False)", "from alembic import op import sqlalchemy as sa # revision identifiers, used by", "table_name='track') op.drop_table('track') op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page')", "Revises: Create Date: 2020-10-27 17:24:10.636183 \"\"\" from alembic import op import sqlalchemy as", "### end Alembic commands ### def downgrade(): # ### commands auto generated by", "op.drop_index(op.f('ix_page_vid'), table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page') # ###", "sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_charset='utf8', mysql_engine='InnoDB' ) op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False) op.create_index(op.f('ix_page_created_at'),", "= '0b840782b66f' down_revision = None branch_labels = None depends_on = None def upgrade():", "nullable=True), sa.Column('uid', sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True),", "# revision identifiers, used by Alembic. revision = '0b840782b66f' down_revision = None branch_labels", "- please adjust! ### op.create_table('page', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('vid', sa.String(length=24), nullable=True), sa.Column('sid', sa.String(length=36),", "['created_at'], unique=False) op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False) op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'],", "sa.String(length=64), nullable=True), sa.Column('ip', sa.String(length=128), nullable=True), sa.Column('user_agent', sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048),", "['uid'], unique=False) op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True),", "'page', ['vid'], unique=False) op.create_table('track', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('event', sa.String(length=64), nullable=True), sa.Column('vid', sa.String(length=24), nullable=True),", "table_name='page') op.drop_index(op.f('ix_page_uid'), table_name='page') op.drop_index(op.f('ix_page_sid'), table_name='page') op.drop_index(op.f('ix_page_created_at'), table_name='page') op.drop_index(op.f('ix_page_cid'), table_name='page') op.drop_table('page') # ### end", "['uid'], unique=False) op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False) # ### end Alembic commands ### def", "sa.String(length=512), nullable=True), sa.Column('referer', sa.String(length=2048), nullable=True), sa.Column('url', sa.String(length=2048), nullable=True), sa.Column('properties', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime()," ]
[ "51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__", "#!/usr/bin/env python # # Parses the CSV version of the IANA Service Name", "Foundation; either version 2 # of the License, or (at your option) any", "defaults to %s ''' % (iana_svc_url) import sys import getopt import csv import", "PURPOSE. See the # GNU General Public License for more details. # #", "# as published by the Free Software Foundation; either version 2 # of", "headers = port_reader.next() else: headers = next(port_reader) try: sn_pos = headers.index('Service Name') except:", "'NOTE Conflict', ] min_body_size = 900000 # Size was ~ 922000 on 2013-08-06", "the License, or (at your option) any later version. # # This program", "'', comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s #", "[url] url defaults to %s ''' % (iana_svc_url) import sys import getopt import", "found at: # %s # %s ''' % (iana_svc_url, body)) if __name__ ==", "CSV version of the IANA Service Name and Transport Protocol Port Number Registry", "lines = [] port_reader = csv.reader(svc_fd) # Header positions as of 2013-08-06 if", "codecs services_file = 'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments =", "purposes', 'NOTE Conflict', ] min_body_size = 900000 # Size was ~ 922000 on", "len(port) < 1 or len(proto) < 1: continue for pos in positions: del", "IANA Service Name and Transport Protocol Port Number Registry # and generates a", "'^spl-itunes', '^shilp', ] exclude_comments = [ 'should not be used for discovery purposes',", "a local copy of the IANA port-numbers file. # # Wireshark uses it", "0x300: import urllib else: import urllib.request, urllib.error, urllib.parse import codecs services_file = 'services'", "# and generates a services(5) file. # # Wireshark - Network traffic analyzer", "the IANA Service Name and Transport Protocol Port Number Registry # and generates", "should have received a copy of the GNU General Public License # along", "else: headers = next(port_reader) try: sn_pos = headers.index('Service Name') except: sn_pos = 0", "1 or len(proto) < 1: continue for pos in positions: del row[pos] row", "0): svc_url = argv[0] else: svc_url = iana_svc_url try: if python_version < 0x300:", "svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening", "else: svc_url = iana_svc_url try: if python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else:", "import re python_version = sys.hexversion >> 16 if python_version < 0x300: import urllib", "req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url) body", "either version 2 # of the License, or (at your option) any later", "without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s' % ( service, port,", "0x206: headers = port_reader.next() else: headers = next(port_reader) try: sn_pos = headers.index('Service Name')", "argv[0] else: svc_url = iana_svc_url try: if python_version < 0x300: svc_fd = urllib.urlopen(svc_url)", "\"--help\"): exit_msg(None, 0) if (len(argv) > 0): svc_url = argv[0] else: svc_url =", "# Wireshark - Network traffic analyzer # By <NAME> <<EMAIL>> # Copyright 2013", "'.join(row) comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment):", "filter(None, row) comment = ' '.join(row) comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services),", "free software; you can redistribute it and/or # modify it under the terms", "= port_reader.next() else: headers = next(port_reader) try: sn_pos = headers.index('Service Name') except: sn_pos", "opening ' + svc_url) body = parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough", "codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url) body = parse_rows(svc_fd) if len(body) <", "= sys.hexversion >> 16 if python_version < 0x300: import urllib else: import urllib.request,", "a copy of the GNU General Public License # along with this program;", "file. # # Wireshark - Network traffic analyzer # By <NAME> <<EMAIL>> #", "is a local copy of the IANA port-numbers file. # # Wireshark uses", "Registry # and generates a services(5) file. # # Wireshark - Network traffic", "= [ 'should not be used for discovery purposes', 'NOTE Conflict', ] min_body_size", "You should have received a copy of the GNU General Public License #", "= row[sn_pos] port = row[pn_pos] proto = row[tp_pos] if len(service) < 1 or", "(\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) > 0): svc_url = argv[0] else: svc_url", "details. # # You should have received a copy of the GNU General", "headers.index('Transport Protocol') except: tp_pos = 2 positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse()", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "import getopt import csv import re python_version = sys.hexversion >> 16 if python_version", "' + svc_url) body = parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough parsed", "service, port, proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is", "that it will be useful, # but WITHOUT ANY WARRANTY; without even the", "of the GNU General Public License # along with this program; if not,", "for discovery purposes', 'NOTE Conflict', ] min_body_size = 900000 # Size was ~", "Name and Transport Protocol Port Number Registry # and generates a services(5) file.", "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url =", "comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s'", "program is free software; you can redistribute it and/or # modify it under", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "# %s' % ( service, port, proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None,", "arg in opts: if opt in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) >", "'\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__", "# The original file can be found at: # %s # %s '''", "at: # %s # %s ''' % (iana_svc_url, body)) if __name__ == \"__main__\":", "it and/or # modify it under the terms of the GNU General Public", "= headers.index('Service Name') except: sn_pos = 0 try: pn_pos = headers.index('Port Number') except:", "row[pn_pos] proto = row[tp_pos] if len(service) < 1 or len(port) < 1 or", "the hope that it will be useful, # but WITHOUT ANY WARRANTY; without", "Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage:", "License # along with this program; if not, write to the Free Software", "parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough parsed data') out = open(services_file, 'w')", "positions.reverse() for row in port_reader: service = row[sn_pos] port = row[pn_pos] proto =", "(len(argv) > 0): svc_url = argv[0] else: svc_url = iana_svc_url try: if python_version", "Network traffic analyzer # By <NAME> <<EMAIL>> # Copyright 2013 <NAME> # #", "'^shilp', ] exclude_comments = [ 'should not be used for discovery purposes', 'NOTE", "def parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd) # Header positions as of", "# You should have received a copy of the GNU General Public License", "opts: if opt in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) > 0): svc_url", "subject to copyright and being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # #", "headers.index('Service Name') except: sn_pos = 0 try: pn_pos = headers.index('Port Number') except: pn_pos", "args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg in opts:", "IANA port-numbers file. # # Wireshark uses it to resolve port numbers into", "http. # # It is subject to copyright and being used with IANA's", "version. # # This program is distributed in the hope that it will", "except getopt.GetoptError: exit_msg() for opt, arg in opts: if opt in (\"-h\", \"--help\"):", "import sys import getopt import csv import re python_version = sys.hexversion >> 16", "version of the IANA Service Name and Transport Protocol Port Number Registry #", "GNU General Public License # as published by the Free Software Foundation; either", "= open(services_file, 'w') out.write('''\\ # This is a local copy of the IANA", "TCP port 80 -> http. # # It is subject to copyright and", "[\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg in opts: if opt in (\"-h\",", "numbers into human readable # service names, e.g. TCP port 80 -> http.", "A PARTICULAR PURPOSE. See the # GNU General Public License for more details.", "open(services_file, 'w') out.write('''\\ # This is a local copy of the IANA port-numbers", "Software Foundation; either version 2 # of the License, or (at your option)", "the terms of the GNU General Public License # as published by the", "under the terms of the GNU General Public License # as published by", "= re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s", "+ svc_url) body = parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough parsed data')", "< 0x206: headers = port_reader.next() else: headers = next(port_reader) try: sn_pos = headers.index('Service", "= ' '.join(row) comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue if", "del row[pos] row = filter(None, row) comment = ' '.join(row) comment = re.sub('[\\n]',", "''' % (iana_svc_url) import sys import getopt import csv import re python_version =", "Number Registry # and generates a services(5) file. # # Wireshark - Network", "02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url] url defaults", "lines.append('%-15s %5s/%s # %s' % ( service, port, proto, comment )) return '\\n'.join(lines)", "software; you can redistribute it and/or # modify it under the terms of", "the CSV version of the IANA Service Name and Transport Protocol Port Number", "in port_reader: service = row[sn_pos] port = row[pn_pos] proto = row[tp_pos] if len(service)", "if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s' %", "of 2013-08-06 if python_version < 0x206: headers = port_reader.next() else: headers = next(port_reader)", "option) any later version. # # This program is distributed in the hope", "# GNU General Public License for more details. # # You should have", "922000 on 2013-08-06 def parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd) # Header", "service = row[sn_pos] port = row[pn_pos] proto = row[tp_pos] if len(service) < 1", "the GNU General Public License # as published by the Free Software Foundation;", "row[sn_pos] port = row[pn_pos] proto = row[tp_pos] if len(service) < 1 or len(port)", "try: if python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd", "sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try: opts, args = getopt.getopt(argv, \"h\", [\"help\"])", "try: sn_pos = headers.index('Service Name') except: sn_pos = 0 try: pn_pos = headers.index('Port", "status=1): if msg is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status)", "# This is a local copy of the IANA port-numbers file. # #", "as of 2013-08-06 if python_version < 0x206: headers = port_reader.next() else: headers =", "= codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url) body = parse_rows(svc_fd) if len(body)", "[ 'should not be used for discovery purposes', 'NOTE Conflict', ] min_body_size =", "= next(port_reader) try: sn_pos = headers.index('Service Name') except: sn_pos = 0 try: pn_pos", "(iana_svc_url) import sys import getopt import csv import re python_version = sys.hexversion >>", "services_file = 'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [", "svc_url = argv[0] else: svc_url = iana_svc_url try: if python_version < 0x300: svc_fd", "= headers.index('Port Number') except: pn_pos = 1 try: tp_pos = headers.index('Transport Protocol') except:", "with this program; if not, write to the Free Software # Foundation, Inc.,", "pn_pos, tp_pos] positions.sort() positions.reverse() for row in port_reader: service = row[sn_pos] port =", "copy of the IANA port-numbers file. # # Wireshark uses it to resolve", "and being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file", "Port Number Registry # and generates a services(5) file. # # Wireshark -", "exit_msg('Not enough parsed data') out = open(services_file, 'w') out.write('''\\ # This is a", "modify it under the terms of the GNU General Public License # as", "row = filter(None, row) comment = ' '.join(row) comment = re.sub('[\\n]', '', comment)", "# # This program is free software; you can redistribute it and/or #", "'^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [ 'should not be used for discovery", "published by the Free Software Foundation; either version 2 # of the License,", "< 1: continue for pos in positions: del row[pos] row = filter(None, row)", "# It is subject to copyright and being used with IANA's permission: #", "IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can be found at:", "# modify it under the terms of the GNU General Public License #", "for opt, arg in opts: if opt in (\"-h\", \"--help\"): exit_msg(None, 0) if", "http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can be found at: # %s #", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "Conflict', ] min_body_size = 900000 # Size was ~ 922000 on 2013-08-06 def", "if (len(argv) > 0): svc_url = argv[0] else: svc_url = iana_svc_url try: if", "enough parsed data') out = open(services_file, 'w') out.write('''\\ # This is a local", "continue for pos in positions: del row[pos] row = filter(None, row) comment =", "0) if (len(argv) > 0): svc_url = argv[0] else: svc_url = iana_svc_url try:", "# service names, e.g. TCP port 80 -> http. # # It is", "make-services.py [url] url defaults to %s ''' % (iana_svc_url) import sys import getopt", "headers.index('Port Number') except: pn_pos = 1 try: tp_pos = headers.index('Transport Protocol') except: tp_pos", "getopt import csv import re python_version = sys.hexversion >> 16 if python_version <", "out.write('''\\ # This is a local copy of the IANA port-numbers file. #", "[ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [ 'should not be used for", "# Copyright 2013 <NAME> # # This program is free software; you can", "and/or # modify it under the terms of the GNU General Public License", "else: import urllib.request, urllib.error, urllib.parse import codecs services_file = 'services' exclude_services = [", "continue lines.append('%-15s %5s/%s # %s' % ( service, port, proto, comment )) return", "csv import re python_version = sys.hexversion >> 16 if python_version < 0x300: import", "= '''\\ Usage: make-services.py [url] url defaults to %s ''' % (iana_svc_url) import", "def main(argv): try: opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for", "getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg in opts: if opt", "Public License # as published by the Free Software Foundation; either version 2", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "terms of the GNU General Public License # as published by the Free", "Wireshark uses it to resolve port numbers into human readable # service names,", "python_version < 0x206: headers = port_reader.next() else: headers = next(port_reader) try: sn_pos =", "2013 <NAME> # # This program is free software; you can redistribute it", "pos in positions: del row[pos] row = filter(None, row) comment = ' '.join(row)", "sys.hexversion >> 16 if python_version < 0x300: import urllib else: import urllib.request, urllib.error,", "= urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url) body =", "pn_pos = 1 try: tp_pos = headers.index('Transport Protocol') except: tp_pos = 2 positions", "row in port_reader: service = row[sn_pos] port = row[pn_pos] proto = row[tp_pos] if", "re python_version = sys.hexversion >> 16 if python_version < 0x300: import urllib else:", "= 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url] url defaults to %s '''", "positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row in port_reader: service =", "as published by the Free Software Foundation; either version 2 # of the", "port, proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is not", "+ '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try: opts, args = getopt.getopt(argv,", "comment): continue lines.append('%-15s %5s/%s # %s' % ( service, port, proto, comment ))", "The original file can be found at: # %s # %s ''' %", "min_body_size: exit_msg('Not enough parsed data') out = open(services_file, 'w') out.write('''\\ # This is", "sn_pos = 0 try: pn_pos = headers.index('Port Number') except: pn_pos = 1 try:", "Protocol Port Number Registry # and generates a services(5) file. # # Wireshark", "python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req)", "data') out = open(services_file, 'w') out.write('''\\ # This is a local copy of", "Wireshark - Network traffic analyzer # By <NAME> <<EMAIL>> # Copyright 2013 <NAME>", "len(service) < 1 or len(port) < 1 or len(proto) < 1: continue for", "it will be useful, # but WITHOUT ANY WARRANTY; without even the implied", "# of the License, or (at your option) any later version. # #", "this program; if not, write to the Free Software # Foundation, Inc., 51", "url defaults to %s ''' % (iana_svc_url) import sys import getopt import csv", "version 2 # of the License, or (at your option) any later version.", "parsed data') out = open(services_file, 'w') out.write('''\\ # This is a local copy", "if msg is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def", "General Public License for more details. # # You should have received a", "csv.reader(svc_fd) # Header positions as of 2013-08-06 if python_version < 0x206: headers =", "0 try: pn_pos = headers.index('Port Number') except: pn_pos = 1 try: tp_pos =", "sys import getopt import csv import re python_version = sys.hexversion >> 16 if", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "if len(service) < 1 or len(port) < 1 or len(proto) < 1: continue", "discovery purposes', 'NOTE Conflict', ] min_body_size = 900000 # Size was ~ 922000", "port_reader: service = row[sn_pos] port = row[pn_pos] proto = row[tp_pos] if len(service) <", "port_reader.next() else: headers = next(port_reader) try: sn_pos = headers.index('Service Name') except: sn_pos =", "# Parses the CSV version of the IANA Service Name and Transport Protocol", "opt, arg in opts: if opt in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv)", "svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url) body = parse_rows(svc_fd) if", "Header positions as of 2013-08-06 if python_version < 0x206: headers = port_reader.next() else:", "min_body_size = 900000 # Size was ~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines", "to %s ''' % (iana_svc_url) import sys import getopt import csv import re", "along with this program; if not, write to the Free Software # Foundation,", "the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA", "= 0 try: pn_pos = headers.index('Port Number') except: pn_pos = 1 try: tp_pos", "return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is not None: sys.stderr.write(msg + '\\n\\n')", "positions as of 2013-08-06 if python_version < 0x206: headers = port_reader.next() else: headers", "Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url]", "'\\n') sys.exit(status) def main(argv): try: opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError:", "< min_body_size: exit_msg('Not enough parsed data') out = open(services_file, 'w') out.write('''\\ # This", "-> http. # # It is subject to copyright and being used with", "the # GNU General Public License for more details. # # You should", "proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is not None:", "'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [ 'should not", "if python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd =", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License", "for more details. # # You should have received a copy of the", "len(body) < min_body_size: exit_msg('Not enough parsed data') out = open(services_file, 'w') out.write('''\\ #", "80 -> http. # # It is subject to copyright and being used", "or (at your option) any later version. # # This program is distributed", "positions.sort() positions.reverse() for row in port_reader: service = row[sn_pos] port = row[pn_pos] proto", "port = row[pn_pos] proto = row[tp_pos] if len(service) < 1 or len(port) <", "urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening ' +", "proto = row[tp_pos] if len(service) < 1 or len(port) < 1 or len(proto)", "# Wireshark uses it to resolve port numbers into human readable # service", "Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ =", "except: pn_pos = 1 try: tp_pos = headers.index('Transport Protocol') except: tp_pos = 2", "more details. # # You should have received a copy of the GNU", "16 if python_version < 0x300: import urllib else: import urllib.request, urllib.error, urllib.parse import", "tp_pos = headers.index('Transport Protocol') except: tp_pos = 2 positions = [sn_pos, pn_pos, tp_pos]", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "GNU General Public License for more details. # # You should have received", "in positions: del row[pos] row = filter(None, row) comment = ' '.join(row) comment", "1 try: tp_pos = headers.index('Transport Protocol') except: tp_pos = 2 positions = [sn_pos,", "# # The original file can be found at: # %s # %s", "headers = next(port_reader) try: sn_pos = headers.index('Service Name') except: sn_pos = 0 try:", "iana_svc_url try: if python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url)", "program is distributed in the hope that it will be useful, # but", "the IANA port-numbers file. # # Wireshark uses it to resolve port numbers", "distributed in the hope that it will be useful, # but WITHOUT ANY", "import urllib.request, urllib.error, urllib.parse import codecs services_file = 'services' exclude_services = [ '^spr-itunes',", "import codecs services_file = 'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments", "2013-08-06 def parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd) # Header positions as", "# Header positions as of 2013-08-06 if python_version < 0x206: headers = port_reader.next()", "sn_pos = headers.index('Service Name') except: sn_pos = 0 try: pn_pos = headers.index('Port Number')", "getopt.GetoptError: exit_msg() for opt, arg in opts: if opt in (\"-h\", \"--help\"): exit_msg(None,", "import csv import re python_version = sys.hexversion >> 16 if python_version < 0x300:", "except: tp_pos = 2 positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row", "any later version. # # This program is distributed in the hope that", "for pos in positions: del row[pos] row = filter(None, row) comment = '", "is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try:", "# By <NAME> <<EMAIL>> # Copyright 2013 <NAME> # # This program is", "was ~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd)", "of the License, or (at your option) any later version. # # This", "it to resolve port numbers into human readable # service names, e.g. TCP", "be found at: # %s # %s ''' % (iana_svc_url, body)) if __name__", "used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can be", "services(5) file. # # Wireshark - Network traffic analyzer # By <NAME> <<EMAIL>>", "python_version < 0x300: import urllib else: import urllib.request, urllib.error, urllib.parse import codecs services_file", "This program is free software; you can redistribute it and/or # modify it", "Street, Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\", "PARTICULAR PURPOSE. See the # GNU General Public License for more details. #", "Protocol') except: tp_pos = 2 positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for", "Public License # along with this program; if not, write to the Free", "> 0): svc_url = argv[0] else: svc_url = iana_svc_url try: if python_version <", "= getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg in opts: if", "row[pos] row = filter(None, row) comment = ' '.join(row) comment = re.sub('[\\n]', '',", "= iana_svc_url try: if python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else: req =", "readable # service names, e.g. TCP port 80 -> http. # # It", "hope that it will be useful, # but WITHOUT ANY WARRANTY; without even", "900000 # Size was ~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines = []", "# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url", "body = parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough parsed data') out =", ">> 16 if python_version < 0x300: import urllib else: import urllib.request, urllib.error, urllib.parse", "Size was ~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines = [] port_reader =", "on 2013-08-06 def parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd) # Header positions", "It is subject to copyright and being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html", "e.g. TCP port 80 -> http. # # It is subject to copyright", "permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can be found at: #", "= [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [ 'should not be used", "] exclude_comments = [ 'should not be used for discovery purposes', 'NOTE Conflict',", "(at your option) any later version. # # This program is distributed in", "This program is distributed in the hope that it will be useful, #", "useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #", "except: exit_msg('Error opening ' + svc_url) body = parse_rows(svc_fd) if len(body) < min_body_size:", "GNU General Public License # along with this program; if not, write to", "write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor,", "exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [ 'should not be", "USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url] url defaults to", "% ( service, port, proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if", "received a copy of the GNU General Public License # along with this", "python_version = sys.hexversion >> 16 if python_version < 0x300: import urllib else: import", "re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s", "msg is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv):", "can be found at: # %s # %s ''' % (iana_svc_url, body)) if", "to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,", "# %s # %s ''' % (iana_svc_url, body)) if __name__ == \"__main__\": sys.exit(main(sys.argv[1:]))", "<NAME> # # This program is free software; you can redistribute it and/or", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is not None: sys.stderr.write(msg", "Floor, Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py", "1: continue for pos in positions: del row[pos] row = filter(None, row) comment", "or len(port) < 1 or len(proto) < 1: continue for pos in positions:", "None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try: opts, args", "[] port_reader = csv.reader(svc_fd) # Header positions as of 2013-08-06 if python_version <", "to copyright and being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The", "exclude_comments = [ 'should not be used for discovery purposes', 'NOTE Conflict', ]", "being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can", "# # It is subject to copyright and being used with IANA's permission:", "# # This program is distributed in the hope that it will be", "if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s' % ( service, port, proto,", "Transport Protocol Port Number Registry # and generates a services(5) file. # #", "in opts: if opt in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) > 0):", "into human readable # service names, e.g. TCP port 80 -> http. #", ")) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg is not None: sys.stderr.write(msg +", "exit_msg(None, 0) if (len(argv) > 0): svc_url = argv[0] else: svc_url = iana_svc_url", "import urllib else: import urllib.request, urllib.error, urllib.parse import codecs services_file = 'services' exclude_services", "copyright and being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original", "Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.", "By <NAME> <<EMAIL>> # Copyright 2013 <NAME> # # This program is free", "not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try: opts,", "be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of", "= 900000 # Size was ~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines =", "comment = ' '.join(row) comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue", "can redistribute it and/or # modify it under the terms of the GNU", "program; if not, write to the Free Software # Foundation, Inc., 51 Franklin", "= row[pn_pos] proto = row[tp_pos] if len(service) < 1 or len(port) < 1", "service names, e.g. TCP port 80 -> http. # # It is subject", "not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth", "it under the terms of the GNU General Public License # as published", "is distributed in the hope that it will be useful, # but WITHOUT", "urllib.request, urllib.error, urllib.parse import codecs services_file = 'services' exclude_services = [ '^spr-itunes', '^spl-itunes',", "0x300: svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public", "urllib.parse import codecs services_file = 'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ]", "in the hope that it will be useful, # but WITHOUT ANY WARRANTY;", "- Network traffic analyzer # By <NAME> <<EMAIL>> # Copyright 2013 <NAME> #", "pn_pos = headers.index('Port Number') except: pn_pos = 1 try: tp_pos = headers.index('Transport Protocol')", "= 2 positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row in port_reader:", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for", "human readable # service names, e.g. TCP port 80 -> http. # #", "# # Wireshark - Network traffic analyzer # By <NAME> <<EMAIL>> # Copyright", "= headers.index('Transport Protocol') except: tp_pos = 2 positions = [sn_pos, pn_pos, tp_pos] positions.sort()", "# Size was ~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines = [] port_reader", "out = open(services_file, 'w') out.write('''\\ # This is a local copy of the", "Name') except: sn_pos = 0 try: pn_pos = headers.index('Port Number') except: pn_pos =", "the GNU General Public License # along with this program; if not, write", "local copy of the IANA port-numbers file. # # Wireshark uses it to", "+ '\\n') sys.exit(status) def main(argv): try: opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except", "# along with this program; if not, write to the Free Software #", "= csv.reader(svc_fd) # Header positions as of 2013-08-06 if python_version < 0x206: headers", "# This program is free software; you can redistribute it and/or # modify", "= 1 try: tp_pos = headers.index('Transport Protocol') except: tp_pos = 2 positions =", "< 0x300: import urllib else: import urllib.request, urllib.error, urllib.parse import codecs services_file =", "Service Name and Transport Protocol Port Number Registry # and generates a services(5)", "re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s' % ( service, port, proto, comment", "original file can be found at: # %s # %s ''' % (iana_svc_url,", "sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try: opts, args =", "redistribute it and/or # modify it under the terms of the GNU General", "# This program is distributed in the hope that it will be useful,", "but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url) body = parse_rows(svc_fd)", "# # Parses the CSV version of the IANA Service Name and Transport", "svc_url) body = parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough parsed data') out", "comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue", "by the Free Software Foundation; either version 2 # of the License, or", "python # # Parses the CSV version of the IANA Service Name and", "the Free Software Foundation; either version 2 # of the License, or (at", "[sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row in port_reader: service = row[sn_pos] port", "names, e.g. TCP port 80 -> http. # # It is subject to", "a services(5) file. # # Wireshark - Network traffic analyzer # By <NAME>", "row[tp_pos] if len(service) < 1 or len(port) < 1 or len(proto) < 1:", "2 positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row in port_reader: service", "uses it to resolve port numbers into human readable # service names, e.g.", "of the GNU General Public License # as published by the Free Software", "is subject to copyright and being used with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html #", "except: sn_pos = 0 try: pn_pos = headers.index('Port Number') except: pn_pos = 1", "2 # of the License, or (at your option) any later version. #", "'w') out.write('''\\ # This is a local copy of the IANA port-numbers file.", "if opt in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) > 0): svc_url =", "Usage: make-services.py [url] url defaults to %s ''' % (iana_svc_url) import sys import", "General Public License # as published by the Free Software Foundation; either version", "Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301", "License for more details. # # You should have received a copy of", "exit_msg() for opt, arg in opts: if opt in (\"-h\", \"--help\"): exit_msg(None, 0)", "< 1 or len(port) < 1 or len(proto) < 1: continue for pos", "Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv'", "% (iana_svc_url) import sys import getopt import csv import re python_version = sys.hexversion", "be used for discovery purposes', 'NOTE Conflict', ] min_body_size = 900000 # Size", "'\\n\\n') sys.stderr.write(__doc__ + '\\n') sys.exit(status) def main(argv): try: opts, args = getopt.getopt(argv, \"h\",", "= filter(None, row) comment = ' '.join(row) comment = re.sub('[\\n]', '', comment) if", "generates a services(5) file. # # Wireshark - Network traffic analyzer # By", "%s ''' % (iana_svc_url) import sys import getopt import csv import re python_version", "exit_msg('Error opening ' + svc_url) body = parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not", "iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url] url defaults to %s", "len(proto) < 1: continue for pos in positions: del row[pos] row = filter(None,", "opt in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) > 0): svc_url = argv[0]", "'should not be used for discovery purposes', 'NOTE Conflict', ] min_body_size = 900000", "if python_version < 0x206: headers = port_reader.next() else: headers = next(port_reader) try: sn_pos", "__doc__ = '''\\ Usage: make-services.py [url] url defaults to %s ''' % (iana_svc_url)", "to resolve port numbers into human readable # service names, e.g. TCP port", "if python_version < 0x300: import urllib else: import urllib.request, urllib.error, urllib.parse import codecs", "try: pn_pos = headers.index('Port Number') except: pn_pos = 1 try: tp_pos = headers.index('Transport", "re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s' % (", "< 0x300: svc_fd = urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except:", "is free software; you can redistribute it and/or # modify it under the", "%5s/%s # %s' % ( service, port, proto, comment )) return '\\n'.join(lines) def", "file. # # Wireshark uses it to resolve port numbers into human readable", "<reponame>mahrukhfida/mi<filename>wireshark-2.0.13/tools/make-services.py #!/usr/bin/env python # # Parses the CSV version of the IANA Service", "MA 02110-1301 USA. iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url] url", "License # as published by the Free Software Foundation; either version 2 #", "of the IANA Service Name and Transport Protocol Port Number Registry # and", "used for discovery purposes', 'NOTE Conflict', ] min_body_size = 900000 # Size was", "See the # GNU General Public License for more details. # # You", "Free Software Foundation; either version 2 # of the License, or (at your", "( service, port, proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1): if msg", "exit_msg(msg=None, status=1): if msg is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ + '\\n')", "or len(proto) < 1: continue for pos in positions: del row[pos] row =", "opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg in", "if len(body) < min_body_size: exit_msg('Not enough parsed data') out = open(services_file, 'w') out.write('''\\", "resolve port numbers into human readable # service names, e.g. TCP port 80", "<<EMAIL>> # Copyright 2013 <NAME> # # This program is free software; you", "your option) any later version. # # This program is distributed in the", "service): continue if re.search('|'.join(exclude_comments), comment): continue lines.append('%-15s %5s/%s # %s' % ( service,", "else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening ' + svc_url)", "next(port_reader) try: sn_pos = headers.index('Service Name') except: sn_pos = 0 try: pn_pos =", "<NAME> <<EMAIL>> # Copyright 2013 <NAME> # # This program is free software;", "%s' % ( service, port, proto, comment )) return '\\n'.join(lines) def exit_msg(msg=None, status=1):", "< 1 or len(proto) < 1: continue for pos in positions: del row[pos]", "# http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can be found at: # %s", "= 'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp', ] exclude_comments = [ 'should", "= urllib.urlopen(svc_url) else: req = urllib.request.urlopen(svc_url) svc_fd = codecs.getreader('utf8')(req) except: exit_msg('Error opening '", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "later version. # # This program is distributed in the hope that it", "you can redistribute it and/or # modify it under the terms of the", "\"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg in opts: if opt in", "This is a local copy of the IANA port-numbers file. # # Wireshark", "parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd) # Header positions as of 2013-08-06", "WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS", "Number') except: pn_pos = 1 try: tp_pos = headers.index('Transport Protocol') except: tp_pos =", "tp_pos = 2 positions = [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row in", "' '.join(row) comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service): continue if re.search('|'.join(exclude_comments),", "sys.exit(status) def main(argv): try: opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg()", "= argv[0] else: svc_url = iana_svc_url try: if python_version < 0x300: svc_fd =", "traffic analyzer # By <NAME> <<EMAIL>> # Copyright 2013 <NAME> # # This", "# # You should have received a copy of the GNU General Public", "'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\\ Usage: make-services.py [url] url defaults to %s ''' %", "= parse_rows(svc_fd) if len(body) < min_body_size: exit_msg('Not enough parsed data') out = open(services_file,", "'''\\ Usage: make-services.py [url] url defaults to %s ''' % (iana_svc_url) import sys", "port 80 -> http. # # It is subject to copyright and being", "FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more", "port-numbers file. # # Wireshark uses it to resolve port numbers into human", "have received a copy of the GNU General Public License # along with", "not be used for discovery purposes', 'NOTE Conflict', ] min_body_size = 900000 #", "Copyright 2013 <NAME> # # This program is free software; you can redistribute", "1 or len(port) < 1 or len(proto) < 1: continue for pos in", "= row[tp_pos] if len(service) < 1 or len(port) < 1 or len(proto) <", "Public License for more details. # # You should have received a copy", "urllib.error, urllib.parse import codecs services_file = 'services' exclude_services = [ '^spr-itunes', '^spl-itunes', '^shilp',", "License, or (at your option) any later version. # # This program is", "= [] port_reader = csv.reader(svc_fd) # Header positions as of 2013-08-06 if python_version", "tp_pos] positions.sort() positions.reverse() for row in port_reader: service = row[sn_pos] port = row[pn_pos]", "in (\"-h\", \"--help\"): exit_msg(None, 0) if (len(argv) > 0): svc_url = argv[0] else:", "of the IANA port-numbers file. # # Wireshark uses it to resolve port", "file can be found at: # %s # %s ''' % (iana_svc_url, body))", "port_reader = csv.reader(svc_fd) # Header positions as of 2013-08-06 if python_version < 0x206:", "urllib else: import urllib.request, urllib.error, urllib.parse import codecs services_file = 'services' exclude_services =", "copy of the GNU General Public License # along with this program; if", "General Public License # along with this program; if not, write to the", "positions: del row[pos] row = filter(None, row) comment = ' '.join(row) comment =", "for row in port_reader: service = row[sn_pos] port = row[pn_pos] proto = row[tp_pos]", "port numbers into human readable # service names, e.g. TCP port 80 ->", "and generates a services(5) file. # # Wireshark - Network traffic analyzer #", "def exit_msg(msg=None, status=1): if msg is not None: sys.stderr.write(msg + '\\n\\n') sys.stderr.write(__doc__ +", "svc_url = iana_svc_url try: if python_version < 0x300: svc_fd = urllib.urlopen(svc_url) else: req", "= [sn_pos, pn_pos, tp_pos] positions.sort() positions.reverse() for row in port_reader: service = row[sn_pos]", "row) comment = ' '.join(row) comment = re.sub('[\\n]', '', comment) if re.search('|'.join(exclude_services), service):", "try: opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt, arg", "will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty", "# # Wireshark uses it to resolve port numbers into human readable #", "main(argv): try: opts, args = getopt.getopt(argv, \"h\", [\"help\"]) except getopt.GetoptError: exit_msg() for opt,", "if not, write to the Free Software # Foundation, Inc., 51 Franklin Street,", "analyzer # By <NAME> <<EMAIL>> # Copyright 2013 <NAME> # # This program", "try: tp_pos = headers.index('Transport Protocol') except: tp_pos = 2 positions = [sn_pos, pn_pos,", "2013-08-06 if python_version < 0x206: headers = port_reader.next() else: headers = next(port_reader) try:", "Parses the CSV version of the IANA Service Name and Transport Protocol Port", "# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "~ 922000 on 2013-08-06 def parse_rows(svc_fd): lines = [] port_reader = csv.reader(svc_fd) #", "and Transport Protocol Port Number Registry # and generates a services(5) file. #", "with IANA's permission: # http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html # # The original file can be found", "] min_body_size = 900000 # Size was ~ 922000 on 2013-08-06 def parse_rows(svc_fd):" ]
[ "from string import Template TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\",", "\"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": { \"S\": \"$id\" } }, \"NewImage\":", "f.read() tx = json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute( id=txId, coin=coin, tx=data.replace('\"', '\\\\\"').replace('\\n',", "{ \"id\": { \"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\" }, \"tx\": {", "'r') as f: data = f.read() tx = json.loads(data) txId = tx['hash'] print", "python webhook/event.py COIN tx.json # where coin is BTC or ETH import json", "= f.read() tx = json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute( id=txId, coin=coin, tx=data.replace('\"',", "\"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin =", "} }, \"NewImage\": { \"id\": { \"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\"", "{ \"Keys\": { \"id\": { \"S\": \"$id\" } }, \"NewImage\": { \"id\": {", "import Template TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\",", "\"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": { \"S\":", "\"NewImage\": { \"id\": { \"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\" }, \"tx\":", "}, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin = sys.argv[1] txFile = sys.argv[2]", "\"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\",", "\"currency\": { \"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600,", "import json import sys from string import Template TEMPLATE=''' { \"Records\": [ {", "\"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } '''", "\"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": { \"S\": \"$id\" } }, \"NewImage\": {", "\"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\" }", "f: data = f.read() tx = json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute( id=txId,", "\"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin = sys.argv[1] txFile = sys.argv[2] with open(txFile,", "''' coin = sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r') as f: data", "\"Keys\": { \"id\": { \"S\": \"$id\" } }, \"NewImage\": { \"id\": { \"S\":", "sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r') as f: data = f.read() tx", "dynamo event for local testing. # Usage: python webhook/event.py COIN tx.json # where", "json import sys from string import Template TEMPLATE=''' { \"Records\": [ { \"eventID\":", "{ \"id\": { \"S\": \"$id\" } }, \"NewImage\": { \"id\": { \"S\": \"$id\"", "\"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": { \"S\": \"$id\" } },", "\"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\":", "TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\",", "{ \"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\"", "data = f.read() tx = json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute( id=txId, coin=coin,", "coin = sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r') as f: data =", "}, \"currency\": { \"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\":", "ETH import json import sys from string import Template TEMPLATE=''' { \"Records\": [", "\"id\": { \"S\": \"$id\" } }, \"NewImage\": { \"id\": { \"S\": \"$id\" },", "{ \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\":", "\"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin = sys.argv[1] txFile = sys.argv[2] with", "} ] } ''' coin = sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r')", "= sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r') as f: data = f.read()", "import sys from string import Template TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\",", "\"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\":", "\"S\": \"$id\" } }, \"NewImage\": { \"id\": { \"S\": \"$id\" }, \"currency\": {", "}, \"NewImage\": { \"id\": { \"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\" },", "testing. # Usage: python webhook/event.py COIN tx.json # where coin is BTC or", "where coin is BTC or ETH import json import sys from string import", "= json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute( id=txId, coin=coin, tx=data.replace('\"', '\\\\\"').replace('\\n', '\\\\n') )", "\"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin", "# Generate a dynamo event for local testing. # Usage: python webhook/event.py COIN", "{ \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\"", "\"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" },", "{ \"S\": \"$id\" } }, \"NewImage\": { \"id\": { \"S\": \"$id\" }, \"currency\":", "coin is BTC or ETH import json import sys from string import Template", "Template TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\":", "\"$id\" }, \"currency\": { \"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\" } },", "Usage: python webhook/event.py COIN tx.json # where coin is BTC or ETH import", "\"id\": { \"S\": \"$id\" }, \"currency\": { \"S\": \"$coin\" }, \"tx\": { \"S\":", "1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] }", "\"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\":", "\"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\":", "\"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\",", "sys from string import Template TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\":", "\"dynamodb\": { \"Keys\": { \"id\": { \"S\": \"$id\" } }, \"NewImage\": { \"id\":", "webhook/event.py COIN tx.json # where coin is BTC or ETH import json import", "}, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" }", "txFile = sys.argv[2] with open(txFile, 'r') as f: data = f.read() tx =", "= sys.argv[2] with open(txFile, 'r') as f: data = f.read() tx = json.loads(data)", "COIN tx.json # where coin is BTC or ETH import json import sys", "tx = json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute( id=txId, coin=coin, tx=data.replace('\"', '\\\\\"').replace('\\n', '\\\\n')", "\"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin = sys.argv[1] txFile", "{ \"S\": \"$coin\" }, \"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\":", "}, \"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0,", "} }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\"", "# where coin is BTC or ETH import json import sys from string", "[ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\":", "{ \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": {", "a dynamo event for local testing. # Usage: python webhook/event.py COIN tx.json #", "\"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": { \"S\": \"$id\" }", "# Usage: python webhook/event.py COIN tx.json # where coin is BTC or ETH", "\"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": { \"S\": \"$id\"", "for local testing. # Usage: python webhook/event.py COIN tx.json # where coin is", "} ''' coin = sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r') as f:", "0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin = sys.argv[1]", "tx.json # where coin is BTC or ETH import json import sys from", "\"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": {", "open(txFile, 'r') as f: data = f.read() tx = json.loads(data) txId = tx['hash']", "is BTC or ETH import json import sys from string import Template TEMPLATE='''", "event for local testing. # Usage: python webhook/event.py COIN tx.json # where coin", "BTC or ETH import json import sys from string import Template TEMPLATE=''' {", "or ETH import json import sys from string import Template TEMPLATE=''' { \"Records\":", "] } ''' coin = sys.argv[1] txFile = sys.argv[2] with open(txFile, 'r') as", "local testing. # Usage: python webhook/event.py COIN tx.json # where coin is BTC", "string import Template TEMPLATE=''' { \"Records\": [ { \"eventID\": \"c4ca4238a0b923820dcc509a6f75849b\", \"eventName\": \"INSERT\", \"eventVersion\":", "\"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\": 0, \"StreamViewType\": \"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ]", "sys.argv[2] with open(txFile, 'r') as f: data = f.read() tx = json.loads(data) txId", "Generate a dynamo event for local testing. # Usage: python webhook/event.py COIN tx.json", "as f: data = f.read() tx = json.loads(data) txId = tx['hash'] print Template(TEMPLATE).substitute(", "\"$id\" } }, \"NewImage\": { \"id\": { \"S\": \"$id\" }, \"currency\": { \"S\":", "with open(txFile, 'r') as f: data = f.read() tx = json.loads(data) txId =", "\"$coin\" }, \"tx\": { \"S\": \"$tx\" } }, \"ApproximateCreationDateTime\": 1428537600, \"SequenceNumber\": \"4421584500000000017450439091\", \"SizeBytes\":", "\"INSERT\", \"eventVersion\": \"1.1\", \"eventSource\": \"aws:dynamodb\", \"awsRegion\": \"us-west-2\", \"dynamodb\": { \"Keys\": { \"id\": {", "<filename>webhook/event.py # Generate a dynamo event for local testing. # Usage: python webhook/event.py", "\"NEW_IMAGE\" }, \"eventSourceARN\": \"arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899\" } ] } ''' coin = sys.argv[1] txFile =" ]
[ "f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed =", "Крюгером \"\"\" description = f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\"", "= utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\")", "*message): \"\"\" — Выложить арт в исскуство \"\"\" like, dislike = library.get.likes(ctx) if", "@commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def art(self,", "проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\", "ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner()", "ctx.guild.id == 754063467610374224: # RU HS art_id = 766035868321710081 else: art_id = 845658540341592099", "Сталком и Крюгером \"\"\" description = f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее", "и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал", "if emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466),", "= f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description, color=config.info )", "f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\", description=description )", "\"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" - Правила челленджа между Сталком и", "color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await", "ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is not None:", "@commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def art(self, ctx,", "забыли добавить изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole):", "Embed, utils from discord.ext import commands from utils import library from utils.classes.Const import", "= 708678722127134810 elif ctx.guild.id == 754063467610374224: # RU HS art_id = 766035868321710081 else:", "между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со", "msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async def", "пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self,", "CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791),", "Heroes of the Storm \"\"\" from discord import Embed, utils from discord.ext import", "Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx):", "title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466)", "await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\")", "discord.ext import commands from utils import library from utils.classes.Const import config clear =", "@art.error async def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await ctx.send(\"Требуется Роль 'Художник'\")", "https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes of the Storm \"\"\" from discord", "ctx.guild.id == 642852514865217578: # RU hots art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224:", "= 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description = f\"**Автор:**", "await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru hs ) async", ") await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru hs )", "is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru", "def art(self, ctx, *message): \"\"\" — Выложить арт в исскуство \"\"\" like, dislike", "{' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый", "if ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url", "await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async", "роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async", "art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description =", "- Правила челленджа между Сталком и Крюгером \"\"\" description = f\"Состязание между стримерами", "async def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await ctx.send(\"Требуется Роль 'Художник'\") def", "ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await ctx.send(\"Требуется Роль 'Художник'\") def setup(bot): bot.add_cog(Ruhots(bot))", "642852514865217578: # RU hots art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224: # RU", "отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" - Правила челленджа между", "hs commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\" — Выложить арт в исскуство", "# RU HS art_id = 766035868321710081 else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels,", "type(emoji)) if emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots", "from utils import library from utils.classes.Const import config clear = '\\u200b' class Ruhots(commands.Cog):", "commands.has_role(830972263749779466) # ru hs ) async def test_art(self, ctx): await ctx.send(\"Проверка роли художник", "not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru hs", "статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\",", "\"\"\" - Правила челленджа между Сталком и Крюгером \"\"\" description = f\"Состязание между", "арт в исскуство \"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578: #", "hots art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224: # RU HS art_id =", "art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {'", "и Крюгером \"\"\" description = f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на", "\\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed)", "Embed( title=\"Новый арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await", "— Выложить арт в исскуство \"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id ==", "config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов \"\"\"", "utils from discord.ext import commands from utils import library from utils.classes.Const import config", "discord import Embed, utils from discord.ext import commands from utils import library from", "import library from utils.classes.Const import config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" —", "'\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def", "async def hrc(self, ctx): \"\"\" - Правила челленджа между Сталком и Крюгером \"\"\"", "description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments:", ") url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike)", "*fennr* github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes of the Storm \"\"\"", "commands from utils import library from utils.classes.Const import config clear = '\\u200b' class", "— Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" -", "ru hots commands.has_role(830972263749779466) # ru hs ) async def test_art(self, ctx): await ctx.send(\"Проверка", "utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791),", "emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), #", "def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await ctx.send(\"Требуется Роль 'Художник'\") def setup(bot):", "'.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый арт!\",", "RU HS art_id = 766035868321710081 else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id)", "title=\"Новый арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed)", "from utils.classes.Const import config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для", "the Storm \"\"\" from discord import Embed, utils from discord.ext import commands from", "добавить изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await", "\\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed", "\\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\", description=description", "\"\"\" from discord import Embed, utils from discord.ext import commands from utils import", "utils.classes.Const import config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для отдельных", "Автор: *fennr* github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes of the Storm", "class Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self,", "со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race", "name=emoji_str) print(emoji, type(emoji)) if emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), #", "commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\" — Выложить арт в исскуство \"\"\"", "description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like)", "elif ctx.guild.id == 754063467610374224: # RU HS art_id = 766035868321710081 else: art_id =", "сообществ по игре Heroes of the Storm \"\"\" from discord import Embed, utils", "utils import library from utils.classes.Const import config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\"", "of the Storm \"\"\" from discord import Embed, utils from discord.ext import commands", "description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru hs", "def test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self,", "def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is", "art(self, ctx, *message): \"\"\" — Выложить арт в исскуство \"\"\" like, dislike =", "ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы", "async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji", "None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686))", "челленджа между Сталком и Крюгером \"\"\" description = f\"Состязание между стримерами **Stalk** и", "hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\" —", "ctx): \"\"\" - Правила челленджа между Сталком и Крюгером \"\"\" description = f\"Состязание", "library from utils.classes.Const import config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды", "\\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed(", "async def test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def", "def hrc(self, ctx): \"\"\" - Правила челленджа между Сталком и Крюгером \"\"\" description", "= '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async", "msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx,", "hots commands.has_role(830972263749779466) # ru hs ) async def test_art(self, ctx): await ctx.send(\"Проверка роли", "if ctx.guild.id == 642852514865217578: # RU hots art_id = 708678722127134810 elif ctx.guild.id ==", "ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru hs ) async def", "Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" - Правила", "для сообществ по игре Heroes of the Storm \"\"\" from discord import Embed,", "await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error", "# RU hots art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224: # RU HS", "= await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\")", "на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал", "# ru hots commands.has_role(830972263749779466) # ru hs ) async def test_art(self, ctx): await", "description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description, color=config.info", "async def art(self, ctx, *message): \"\"\" — Выложить арт в исскуство \"\"\" like,", "= 766035868321710081 else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) >", "ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str)", "**Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\", "художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def", "**CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\"", "исскуство \"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU hots", "f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description, color=config.info ) url", "msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить", "await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async def ruhots_handler(self,", "= library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU hots art_id = 708678722127134810 elif", "embed = Embed( title=\"Новый арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg", "library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU hots art_id = 708678722127134810 elif ctx.guild.id", "else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx, error):", "# ru hs commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\" — Выложить арт", "else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description", "Samuro Bot Автор: *fennr* github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes of", "embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли", "= f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed", "from discord import Embed, utils from discord.ext import commands from utils import library", "{ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed(", "commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\" — Выложить", "ru hs commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\" — Выложить арт в", "@commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru hs ) async def test_art(self,", "get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is not", "f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\")", "github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes of the Storm \"\"\" from", "\"\"\" description = f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\", "= ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await", "Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru", "> 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\"", "@test_art.error @art.error async def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await ctx.send(\"Требуется Роль", "@commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str):", "test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx):", "description = f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\"", "изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx, error): if isinstance(error, commands.errors.MissingRole): await ctx.send(\"Требуется", "else: description = f\"**Автор:** {ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description,", "from discord.ext import commands from utils import library from utils.classes.Const import config clear", "emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis,", "Storm \"\"\" from discord import Embed, utils from discord.ext import commands from utils", "# ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def art(self, ctx, *message):", "Выложить арт в исскуство \"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578:", "\"\"\" — Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\"", "emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is not None: await", "ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def", "по игре Heroes of the Storm \"\"\" from discord import Embed, utils from", "Bot Автор: *fennr* github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes of the", "*Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\"", "@commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx,", "print(emoji, type(emoji)) if emoji is not None: await ctx.send(f\"{emoji}\") @commands.command(name=\"art\") @commands.check_any(commands.has_role(825399436863733791), # ru", "Бот для сообществ по игре Heroes of the Storm \"\"\" from discord import", "@commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" - Правила челленджа между Сталком и Крюгером", "like, dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU hots art_id =", "dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU hots art_id = 708678722127134810", "754063467610374224: # RU HS art_id = 766035868321710081 else: art_id = 845658540341592099 art_channel =", "f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes", "Правила челленджа между Сталком и Крюгером \"\"\" description = f\"Состязание между стримерами **Stalk**", "766035868321710081 else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) > 0:", ") async def test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async", "ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner() async def emoji(self, ctx): print(ctx.guild.emojis)", "\"\"\" — Выложить арт в исскуство \"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id", "@commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji))", "f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\" \\ f\"[Канал Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed =", "в исскуство \"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU", "{ctx.author.mention}\" if ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description, color=config.info ) url =", "HS art_id = 766035868321710081 else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if", "# ru hs ) async def test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\")", "def emoji(self, ctx): print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji =", "@commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) # ru hs ) async def test_art(self, ctx):", "= Embed( title=\"Новый арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg =", "для отдельных серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" - Правила челленджа", "Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots commands.has_role(830972263749779466) #", "emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji is not None: await ctx.send(f\"{emoji}\")", "ctx.message.attachments: embed = Embed( title=\"Новый арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url)", "между Сталком и Крюгером \"\"\" description = f\"Состязание между стримерами **Stalk** и **CRYGER**,", "len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:**", "url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else:", "\"\"\"\" Samuro Bot Автор: *fennr* github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре Heroes", "ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx, error): if isinstance(error,", "@commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if", "= utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\"", "ru hs ) async def test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\")", "clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов \"\"\" @commands.command(name=\"hrc\")", "hrc(self, ctx): \"\"\" - Правила челленджа между Сталком и Крюгером \"\"\" description =", "art_id = 766035868321710081 else: art_id = 845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message)", "= Embed( title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru", "await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async def ruhots_handler(self, ctx, error): if", "== 754063467610374224: # RU HS art_id = 766035868321710081 else: art_id = 845658540341592099 art_channel", "f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица", "845658540341592099 art_channel = utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:**", "await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error async", "стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\ f\"[Таблица со статистикой](https://bit.ly/HeroesRC)\\n\"", "ru hots commands.has_role(830972263749779466), # ru hs commands.has_role(880865537058545686)) async def art(self, ctx, *message): \"\"\"", "Stalk](https://trovo.live/stlk)\\n\" \\ f\"[Канал CRYGER](https://trovo.live/CRYGER)\" embed = Embed( title=\"Heroes Race Challenge\", description=description ) await", "import commands from utils import library from utils.classes.Const import config clear = '\\u200b'", "hs ) async def test_art(self, ctx): await ctx.send(\"Проверка роли художник пройдена\") @commands.command(name=\"emoji\") @commands.is_owner()", "708678722127134810 elif ctx.guild.id == 754063467610374224: # RU HS art_id = 766035868321710081 else: art_id", "RU hots art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224: # RU HS art_id", "серверов \"\"\" @commands.command(name=\"hrc\") async def hrc(self, ctx): \"\"\" - Правила челленджа между Сталком", "print(ctx.guild.emojis) @commands.command(name=\"get_emoji\") @commands.is_owner() async def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji,", "= f\"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\\n\" \\ f\"[Правила](https://discord.gg/jRrxwSWBQY)\\n\" \\", "<filename>cogs/guilds.py \"\"\"\" Samuro Bot Автор: *fennr* github: https://github.com/fennr/Samuro-HotsBot Бот для сообществ по игре", "async def get_emoji(self, ctx, emoji_str): emoji = utils.get(ctx.guild.emojis, name=emoji_str) print(emoji, type(emoji)) if emoji", "Embed( title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), # ru hots", "\"\"\" like, dislike = library.get.likes(ctx) if ctx.guild.id == 642852514865217578: # RU hots art_id", "== 642852514865217578: # RU hots art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224: #", "id=art_id) if len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description", "ctx, *message): \"\"\" — Выложить арт в исскуство \"\"\" like, dislike = library.get.likes(ctx)", "арт!\", description=description, color=config.info ) url = ctx.message.attachments[0].url embed.set_image(url=url) msg = await art_channel.send(embed=embed) await", "art_channel.send(embed=embed) await msg.add_reaction(emoji=like) await msg.add_reaction(emoji=dislike) else: await ctx.send(\"Вы забыли добавить изображение\") @test_art.error @art.error", "игре Heroes of the Storm \"\"\" from discord import Embed, utils from discord.ext", "art_id = 708678722127134810 elif ctx.guild.id == 754063467610374224: # RU HS art_id = 766035868321710081", "0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description = f\"**Автор:** {ctx.author.mention}\" if", "utils.get(ctx.guild.channels, id=art_id) if len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else:", "if len(message) > 0: description = f\"**Автор:** {ctx.author.mention}\\n**Комментарий:** {' '.join(message)}\" else: description =", "import Embed, utils from discord.ext import commands from utils import library from utils.classes.Const", "import config clear = '\\u200b' class Ruhots(commands.Cog): \"\"\" — Команды для отдельных серверов", "embed = Embed( title=\"Heroes Race Challenge\", description=description ) await ctx.send(embed=embed) @commands.command(name=\"test_art\") @commands.check_any(commands.has_role(825399436863733791), #" ]
[ "clinical_significance: print \"varaint\" + id_names + \"is a\" + consequence_new + \",\" +", "== \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches =", "i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance:", "+ consequence_new + \",\" + \" and is clinically \" + clinical_significance[0].upper() else:", "\" + clinical_significance[0].upper() else: print \"varaint\" + id_names + \"is a\" + consequence_new", "if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0]", "i in json_obj: id_names = i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \")", "the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line)", "+ \",\" + \" and is clinically \" + clinical_significance[0].upper() else: print \"varaint\"", "i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names + \"is a\" + consequence_new +", "import urllib user_gene_name = raw_input('Enter the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if", "id_names + \"is a\" + consequence_new + \",\" + \" and is clinically", "if clinical_significance: print \"varaint\" + id_names + \"is a\" + consequence_new + \",\"", "for i in json_obj: id_names = i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\"", "= i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names + \"is a\" + consequence_new", "clinical_significance[0].upper() else: print \"varaint\" + id_names + \"is a\" + consequence_new + \",\"", "= urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i in json_obj: id_names =", "= re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for", "clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names + \"is a\" +", "= json.load(data) for i in json_obj: id_names = i['id'] consequence_type = i['consequence_type'] consequence_new", "is clinically \" + clinical_significance[0].upper() else: print \"varaint\" + id_names + \"is a\"", "user_gene_name = raw_input('Enter the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line):", "fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\":", "+ \"is a\" + consequence_new + \",\" + \" and is clinically \"", "import re import sys import fileinput import json import urllib user_gene_name = raw_input('Enter", "= raw_input('Enter the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column", "gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";',", "+ \" and is clinically \" + clinical_significance[0].upper() else: print \"varaint\" + id_names", "= i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if", "+ clinical_significance[0].upper() else: print \"varaint\" + id_names + \"is a\" + consequence_new +", "i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\" +", "line) if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+", "in json_obj: id_names = i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance", "urllib user_gene_name = raw_input('Enter the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t',", "= re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line)", "gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data)", "clinically \" + clinical_significance[0].upper() else: print \"varaint\" + id_names + \"is a\" +", "if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]:", "re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches =", "text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches", "json_obj = json.load(data) for i in json_obj: id_names = i['id'] consequence_type = i['consequence_type']", "#!/usr/bin/python import re import sys import fileinput import json import urllib user_gene_name =", "+\".json?feature=variation\") json_obj = json.load(data) for i in json_obj: id_names = i['id'] consequence_type =", "\" and is clinically \" + clinical_significance[0].upper() else: print \"varaint\" + id_names +", "for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if", "id_names = i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance']", "re import sys import fileinput import json import urllib user_gene_name = raw_input('Enter the", "consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names", "print \"varaint\" + id_names + \"is a\" + consequence_new + \",\" + \"", "len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name ==", "import sys import fileinput import json import urllib user_gene_name = raw_input('Enter the gene", "\"is a\" + consequence_new + \",\" + \" and is clinically \" +", "line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i in json_obj:", "= re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line)", "+ id_names + \"is a\" + consequence_new + \",\" + \" and is", "\"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id", "consequence_new + \",\" + \" and is clinically \" + clinical_significance[0].upper() else: print", "gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i in json_obj: id_names = i['id'] consequence_type", "urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i in json_obj: id_names = i['id']", "if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name", "gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if", "if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches", "in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] ==", "= consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names +", "fileinput import json import urllib user_gene_name = raw_input('Enter the gene name') for line", "json_obj: id_names = i['id'] consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance =", "\") clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names + \"is a\"", "gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj =", "re.findall('gene_name \\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data", "== gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj", "line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name", "\"varaint\" + id_names + \"is a\" + consequence_new + \",\" + \" and", "consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\" + id_names + \"is", "line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2]", "\\\"(.*?)\\\";', line) if user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data =", "data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i in json_obj: id_names", "\",\" + \" and is clinically \" + clinical_significance[0].upper() else: print \"varaint\" +", "a\" + consequence_new + \",\" + \" and is clinically \" + clinical_significance[0].upper()", "user_gene_name == gene_name_matches[0]: gene_id_matches = re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\")", "and is clinically \" + clinical_significance[0].upper() else: print \"varaint\" + id_names + \"is", "consequence_type = i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance: print", "\\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i in", "re.findall('gene_id \\\"(.*?)\\\";', line) data = urllib.urlopen(\"http://rest.ensembl.org/overlap/id/\"+ gene_id_matches[0] +\".json?feature=variation\") json_obj = json.load(data) for i", "text_in_column = re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";',", "name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column = re.split('\\t',line) if len(text_in_column)>3:", "json import urllib user_gene_name = raw_input('Enter the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']):", "raw_input('Enter the gene name') for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']): if re.match(r'.*\\t.*\\tgene\\t', line): text_in_column =", "sys import fileinput import json import urllib user_gene_name = raw_input('Enter the gene name')", "import json import urllib user_gene_name = raw_input('Enter the gene name') for line in", "json.load(data) for i in json_obj: id_names = i['id'] consequence_type = i['consequence_type'] consequence_new =", "= i['consequence_type'] consequence_new = consequence_type.replace(\"_\",\" \") clinical_significance = i['clinical_significance'] if clinical_significance: print \"varaint\"", "import fileinput import json import urllib user_gene_name = raw_input('Enter the gene name') for", "re.split('\\t',line) if len(text_in_column)>3: if text_in_column[2] == \"gene\": gene_name_matches = re.findall('gene_name \\\"(.*?)\\\";', line) if" ]
[ "class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current) if current == -1: return current", "content.find(link_start, current) if current == -1: return current = current + len(link_start) link_end", "= content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end] current = publish_date_end + 1", "= content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link : \", link) current =", "1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if", "content.insert(i, \"publish_date : \" + publish_date) break content = '\\n'.join(content) with open('./md/dump_' +", "in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file)) as f: content = f.read() parse_content(content)", "f: content = f.read() content = content.split('\\n') for i in range(2, len(content)): if", "= current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end]", "')] print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file))", "parse_content(content): current = 0 while True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current", "print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file)) as", "\"w\") as fw: fw.write(content) def parse_content(content): current = 0 while True: link_start =", "while True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current) if", "content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link : \", link) current = link_end", "= content[current:publish_date_end] current = publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link,", "+ 1) publish_date = content[current:publish_date_end] current = publish_date_end + 1 publish_date = publish_date[:publish_date.find('", "current == -1: return current = current + len(link_start) link_end = content.find('\"', current", "publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start, current) if current == -1: return", "= current + len(link_start) link_end = content.find('\"', current + 1) link = content[current:link_end]", "link = content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link : \", link) current", "as f: content = f.read() content = content.split('\\n') for i in range(2, len(content)):", "add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file)) as f:", "current = 0 while True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current =", "current = content.find(link_start, current) if current == -1: return current = current +", "\" + publish_date) break content = '\\n'.join(content) with open('./md/dump_' + str(link) + '.md',", "publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file)) as f: content", "fw.write(content) def parse_content(content): current = 0 while True: link_start = '<strong class=\"tit_post tit_ellip\"><a", "class=\"txt_info\">' current = content.find(publish_date_start, current) if current == -1: return current = current", "os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_' + str(link) + '.md') as f:", "in range(2, len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date : \" + publish_date)", "content.find('\"', current + 1) link = content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link", "href=\"' current = content.find(link_start, current) if current == -1: return current = current", "link_end = content.find('\"', current + 1) link = content[current:link_end] link = int(link[link.rfind('/') +", "content = content.split('\\n') for i in range(2, len(content)): if content[i].find('------------') == 0: content.insert(i,", "open('./md/dump_' + str(link) + '.md', \"w\") as fw: fw.write(content) def parse_content(content): current =", "publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'):", "1) publish_date = content[current:publish_date_end] current = publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')]", "add_date_to_md(link, publish_date): if os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_' + str(link) +", "== -1: return current = current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current +", "'\\n'.join(content) with open('./md/dump_' + str(link) + '.md', \"w\") as fw: fw.write(content) def parse_content(content):", "= '<span class=\"txt_info\">' current = content.find(publish_date_start, current) if current == -1: return current", "+ str(link) + '.md'): with open('./md/dump_' + str(link) + '.md') as f: content", "= 0 while True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start,", "1) link = content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link : \", link)", "== -1: return current = current + len(link_start) link_end = content.find('\"', current +", "with open('./md/dump_' + str(link) + '.md', \"w\") as fw: fw.write(content) def parse_content(content): current", "== 0: content.insert(i, \"publish_date : \" + publish_date) break content = '\\n'.join(content) with", ": \", link) current = link_end + 1 publish_date_start = '<span class=\"txt_info\">' current", "\", link) current = link_end + 1 publish_date_start = '<span class=\"txt_info\">' current =", "import os def add_date_to_md(link, publish_date): if os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_'", "'<span class=\"txt_info\">' current = content.find(publish_date_start, current) if current == -1: return current =", "current = current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1) publish_date =", "return current = current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1) publish_date", "open('./md/dump_' + str(link) + '.md') as f: content = f.read() content = content.split('\\n')", "= link_end + 1 publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start, current) if", "current) if current == -1: return current = current + len(publish_date_start) publish_date_end =", "1 publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start, current) if current == -1:", "content = f.read() content = content.split('\\n') for i in range(2, len(content)): if content[i].find('------------')", "len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date : \" + publish_date) break content", "if content[i].find('------------') == 0: content.insert(i, \"publish_date : \" + publish_date) break content =", "os def add_date_to_md(link, publish_date): if os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_' +", "True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current) if current", "= publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file", "= content.find(publish_date_start, current) if current == -1: return current = current + len(publish_date_start)", "return current = current + len(link_start) link_end = content.find('\"', current + 1) link", "-1: return current = current + len(link_start) link_end = content.find('\"', current + 1)", "link = int(link[link.rfind('/') + 1:]) print(\"Link : \", link) current = link_end +", "+ 1 publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start, current) if current ==", "f.read() content = content.split('\\n') for i in range(2, len(content)): if content[i].find('------------') == 0:", "+ 1) link = content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link : \",", "def add_date_to_md(link, publish_date): if os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_' + str(link)", "current = link_end + 1 publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start, current)", "tit_ellip\"><a href=\"' current = content.find(link_start, current) if current == -1: return current =", "content[current:publish_date_end] current = publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date)", "file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file)) as f: content = f.read()", "if os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_' + str(link) + '.md') as", "with open('./md/dump_' + str(link) + '.md') as f: content = f.read() content =", "1:]) print(\"Link : \", link) current = link_end + 1 publish_date_start = '<span", "print(\"Link : \", link) current = link_end + 1 publish_date_start = '<span class=\"txt_info\">'", "content = '\\n'.join(content) with open('./md/dump_' + str(link) + '.md', \"w\") as fw: fw.write(content)", "'.md'): with open('./md/dump_' + str(link) + '.md') as f: content = f.read() content", "= content.split('\\n') for i in range(2, len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date", "+ len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end] current =", "str(link) + '.md'): with open('./md/dump_' + str(link) + '.md') as f: content =", "if current == -1: return current = current + len(link_start) link_end = content.find('\"',", "publish_date_end = content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end] current = publish_date_end +", "publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file in", "current == -1: return current = current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current", "= content.find(link_start, current) if current == -1: return current = current + len(link_start)", "range(2, len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date : \" + publish_date) break", "link_end + 1 publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start, current) if current", "publish_date): if os.path.exists('./md/dump_' + str(link) + '.md'): with open('./md/dump_' + str(link) + '.md')", "current = publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for", "-1: return current = current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1)", "= '\\n'.join(content) with open('./md/dump_' + str(link) + '.md', \"w\") as fw: fw.write(content) def", "current + len(link_start) link_end = content.find('\"', current + 1) link = content[current:link_end] link", "+ str(link) + '.md', \"w\") as fw: fw.write(content) def parse_content(content): current = 0", "int(link[link.rfind('/') + 1:]) print(\"Link : \", link) current = link_end + 1 publish_date_start", "publish_date) break content = '\\n'.join(content) with open('./md/dump_' + str(link) + '.md', \"w\") as", "'.md') as f: content = f.read() content = content.split('\\n') for i in range(2,", "\"publish_date : \" + publish_date) break content = '\\n'.join(content) with open('./md/dump_' + str(link)", "+ publish_date) break content = '\\n'.join(content) with open('./md/dump_' + str(link) + '.md', \"w\")", "0 while True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current)", "as fw: fw.write(content) def parse_content(content): current = 0 while True: link_start = '<strong", "current + len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end] current", "fw: fw.write(content) def parse_content(content): current = 0 while True: link_start = '<strong class=\"tit_post", "= '<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current) if current == -1:", "publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory',", "= content.find('\"', current + 1) link = content[current:link_end] link = int(link[link.rfind('/') + 1:])", "link) current = link_end + 1 publish_date_start = '<span class=\"txt_info\">' current = content.find(publish_date_start,", "= int(link[link.rfind('/') + 1:]) print(\"Link : \", link) current = link_end + 1", "= publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'): if file.endswith('.htm'): with", "if current == -1: return current = current + len(publish_date_start) publish_date_end = content.find(\"</span>\",", "content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end] current = publish_date_end + 1 publish_date", "len(link_start) link_end = content.find('\"', current + 1) link = content[current:link_end] link = int(link[link.rfind('/')", "+ 1:]) print(\"Link : \", link) current = link_end + 1 publish_date_start =", "+ '.md', \"w\") as fw: fw.write(content) def parse_content(content): current = 0 while True:", "publish_date = content[current:publish_date_end] current = publish_date_end + 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date)", "content[i].find('------------') == 0: content.insert(i, \"publish_date : \" + publish_date) break content = '\\n'.join(content)", "def parse_content(content): current = 0 while True: link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"'", "'.md', \"w\") as fw: fw.write(content) def parse_content(content): current = 0 while True: link_start", "for file in os.listdir('./tistory'): if file.endswith('.htm'): with open(os.path.join('./tistory', file)) as f: content =", "+ len(link_start) link_end = content.find('\"', current + 1) link = content[current:link_end] link =", "current) if current == -1: return current = current + len(link_start) link_end =", "link_start = '<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current) if current ==", "0: content.insert(i, \"publish_date : \" + publish_date) break content = '\\n'.join(content) with open('./md/dump_'", "current + 1) link = content[current:link_end] link = int(link[link.rfind('/') + 1:]) print(\"Link :", "+ str(link) + '.md') as f: content = f.read() content = content.split('\\n') for", "for i in range(2, len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date : \"", "'<strong class=\"tit_post tit_ellip\"><a href=\"' current = content.find(link_start, current) if current == -1: return", "content.find(publish_date_start, current) if current == -1: return current = current + len(publish_date_start) publish_date_end", "+ 1 publish_date = publish_date[:publish_date.find(' ')] print(publish_date) add_date_to_md(link, publish_date) for file in os.listdir('./tistory'):", "content.split('\\n') for i in range(2, len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date :", "break content = '\\n'.join(content) with open('./md/dump_' + str(link) + '.md', \"w\") as fw:", "+ '.md'): with open('./md/dump_' + str(link) + '.md') as f: content = f.read()", "i in range(2, len(content)): if content[i].find('------------') == 0: content.insert(i, \"publish_date : \" +", "str(link) + '.md', \"w\") as fw: fw.write(content) def parse_content(content): current = 0 while", ": \" + publish_date) break content = '\\n'.join(content) with open('./md/dump_' + str(link) +", "current = current + len(link_start) link_end = content.find('\"', current + 1) link =", "current + 1) publish_date = content[current:publish_date_end] current = publish_date_end + 1 publish_date =", "current = content.find(publish_date_start, current) if current == -1: return current = current +", "= f.read() content = content.split('\\n') for i in range(2, len(content)): if content[i].find('------------') ==", "len(publish_date_start) publish_date_end = content.find(\"</span>\", current + 1) publish_date = content[current:publish_date_end] current = publish_date_end", "+ '.md') as f: content = f.read() content = content.split('\\n') for i in", "str(link) + '.md') as f: content = f.read() content = content.split('\\n') for i" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.],", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver", "5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(),", "laplace import LaplaceSolver import numpy as np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3)", "solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0.,", "License. # You may obtain a copy of the License at # #", "[7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01) def test_right_bc(): solver", "solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__", "governing permissions and # limitations under the License. import sys sys.path.append('../src') from laplace", "solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__ == \"__main__\": test_top_bc() test_left_bc()", "solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.],", "law or agreed to in writing, software # distributed under the License is", "LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958,", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]),", "x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left',", "np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01) def", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01)", "this file except in compliance with the License. # You may obtain a", "numpy as np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10)", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y:", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "2.625, 0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01) def test_right_bc(): solver =", "= LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101],", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "0., 0.]]) , atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5)", "lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4)", "0., 0., 0.]]) , atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y:", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "ANY KIND, either express or implied. # See the License for the specific", "0. ],[7., 0., 0., 0.]]) , atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right',", "atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14.,", "in compliance with the License. # You may obtain a copy of the", "0., 5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True)", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "import numpy as np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y:", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875,", "lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.],", "0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01) def", "not use this file except in compliance with the License. # You may", "solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7.,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "under the License. import sys sys.path.append('../src') from laplace import LaplaceSolver import numpy as", "10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda", "5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver", "See the License for the specific language governing permissions and # limitations under", "0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc():", "0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3)", "# limitations under the License. import sys sys.path.append('../src') from laplace import LaplaceSolver import", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the License. import sys sys.path.append('../src') from laplace import LaplaceSolver import numpy as np", "License, Version 2.0 (the \"License\"); # you may not use this file except", "np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(),", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7.,", "sys.path.append('../src') from laplace import LaplaceSolver import numpy as np def test_top_bc(): solver =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01)", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14)", "def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14.,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0.,", "7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "0.]]) , atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True)", "License. import sys sys.path.append('../src') from laplace import LaplaceSolver import numpy as np def", "np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0.,", "np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__ == \"__main__\": test_top_bc() test_left_bc() test_right_bc()", "OF ANY KIND, either express or implied. # See the License for the", "0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]]) ,", "as np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True)", "2.0 (the \"License\"); # you may not use this file except in compliance", "the specific language governing permissions and # limitations under the License. import sys", "],[7., 0., 0., 0.]]) , atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda", "def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0.,", "sys sys.path.append('../src') from laplace import LaplaceSolver import numpy as np def test_top_bc(): solver", "# you may not use this file except in compliance with the License.", "permissions and # limitations under the License. import sys sys.path.append('../src') from laplace import", "solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0.", "LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if", "agreed to in writing, software # distributed under the License is distributed on", "solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]),", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "<gh_stars>0 #/usr/bin/env python # # Copyright 2020-2021 <NAME> # # Licensed under the", "2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01)", "(the \"License\"); # you may not use this file except in compliance with", "= LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625,", "x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__ == \"__main__\":", "# Copyright 2020-2021 <NAME> # # Licensed under the Apache License, Version 2.0", ", atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(),", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "[0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom',", "either express or implied. # See the License for the specific language governing", "10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(),", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]),", "atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7.,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "2020-2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver =", "x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0.,", "x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625,", "file except in compliance with the License. # You may obtain a copy", "def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10.,", "5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0.,", "test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0.,", "= LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def", "#/usr/bin/env python # # Copyright 2020-2021 <NAME> # # Licensed under the Apache", "from laplace import LaplaceSolver import numpy as np def test_top_bc(): solver = LaplaceSolver(nx=4,", "0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0., 0.,", "# # Copyright 2020-2021 <NAME> # # Licensed under the Apache License, Version", "language governing permissions and # limitations under the License. import sys sys.path.append('../src') from", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0.,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda x,y: 10) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc():", "np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7) solver.swig_solve(quiet=True)", "lambda x,y: 14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__ ==", "the License. # You may obtain a copy of the License at #", "solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y:", "to in writing, software # distributed under the License is distributed on an", "lambda x,y: 7) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7.,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01)", "Copyright 2020-2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "LaplaceSolver import numpy as np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top', lambda", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__ == \"__main__\": test_top_bc() test_left_bc() test_right_bc() test_bottom_bc()", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "import LaplaceSolver import numpy as np def test_top_bc(): solver = LaplaceSolver(nx=4, ny=3) solver.set_boundary_condtion('top',", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "and # limitations under the License. import sys sys.path.append('../src') from laplace import LaplaceSolver", "applicable law or agreed to in writing, software # distributed under the License", "0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3)", "0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver =", "python # # Copyright 2020-2021 <NAME> # # Licensed under the Apache License,", "0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]])", "solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0.,", "atol=0.01) def test_right_bc(): solver = LaplaceSolver(nx=4,ny=3) solver.set_boundary_condtion('right', lambda x,y: 5) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[0.,", "limitations under the License. import sys sys.path.append('../src') from laplace import LaplaceSolver import numpy", "0.], [7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01) def test_right_bc():", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "0., 5.], [0., 0., 5.]]), atol=0.01) def test_bottom_bc(): solver = LaplaceSolver(nx=3,ny=3) solver.set_boundary_condtion('bottom', lambda", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License. import sys sys.path.append('../src')", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "import sys sys.path.append('../src') from laplace import LaplaceSolver import numpy as np def test_top_bc():", "14) solver.swig_solve(quiet=True) np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01) if __name__ == \"__main__\": test_top_bc()", "np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01) def test_left_bc(): solver = LaplaceSolver(nx=4,ny=4) solver.set_boundary_condtion('left', lambda x,y: 7)", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0,", "w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b')", "e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for i in range(5): key", "w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch()", "self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for i in range(5):", "delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for i in", "<gh_stars>10-100 import time import unittest from pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self):", "for i in range(5): key = 'key_%d' % i w.start(key) time.sleep(0.1) for i", "time import unittest from pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w =", "w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a", "i w.start(key) time.sleep(0.1) for i in range(5): key = 'key_%d' % i e", "time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b,", "w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self):", "key = 'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1,", "i in range(5): key = 'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5 -", "def test_running_stopwatches(self): w = PyStopwatch() for i in range(5): key = 'key_%d' %", "self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for", "'key_%d' % i w.start(key) time.sleep(0.1) for i in range(5): key = 'key_%d' %", "in range(5): key = 'key_%d' % i w.start(key) time.sleep(0.1) for i in range(5):", "import time import unittest from pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w", "% i w.start(key) time.sleep(0.1) for i in range(5): key = 'key_%d' % i", "w.start(key) time.sleep(0.1) for i in range(5): key = 'key_%d' % i e =", "= w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w =", "key = 'key_%d' % i w.start(key) time.sleep(0.1) for i in range(5): key =", "= PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5)", "= w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b =", "time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a =", "delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for i in range(5): key =", "w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03) if __name__ == '__main__': unittest.main()", "range(5): key = 'key_%d' % i w.start(key) time.sleep(0.1) for i in range(5): key", "e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for i", "pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a')", "def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e,", "import unittest from pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch()", "PyStopwatch() for i in range(5): key = 'key_%d' % i w.start(key) time.sleep(0.1) for", "time.sleep(0.1) for i in range(5): key = 'key_%d' % i e = w.get_elapsed(key)", "test_running_stopwatches(self): w = PyStopwatch() for i in range(5): key = 'key_%d' % i", "w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05)", "range(5): key = 'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) *", "print(w.__repr__()) def test_running_stopwatches(self): w = PyStopwatch() for i in range(5): key = 'key_%d'", "self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0,", "'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03)", "w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5,", "e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def test_running_stopwatches(self): w", "for i in range(5): key = 'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5", "i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03) if __name__", "e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03) if __name__ ==", "PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e =", "w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b')", "e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a,", "e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b", "in range(5): key = 'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i)", "i in range(5): key = 'key_%d' % i w.start(key) time.sleep(0.1) for i in", "= 'key_%d' % i w.start(key) time.sleep(0.1) for i in range(5): key = 'key_%d'", "w = PyStopwatch() for i in range(5): key = 'key_%d' % i w.start(key)", "class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a')", "= w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03) if __name__ == '__main__':", "unittest from pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a')", "e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__())", "PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b')", "% i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03) if", "w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a')", "= PyStopwatch() for i in range(5): key = 'key_%d' % i w.start(key) time.sleep(0.1)", "= 'key_%d' % i e = w.get_elapsed(key) self.assertAlmostEqual((5 - i) * 0.1, e,", "= w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05) self.assertAlmostEqual(0.5, e_b, delta=0.05) print(w.__repr__()) def", "test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e = w.get_elapsed('a') self.assertAlmostEqual(1.0, e, delta=0.05)", "import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1) w.pause('a') e", "delta=0.05) w.start('b') time.sleep(0.5) w.pause('b') e_a = w.get_elapsed('a') e_b = w.get_elapsed('b') self.assertAlmostEqual(1.0, e_a, delta=0.05)", "from pystopwatch2.watch import PyStopwatch class TestStringMethods(unittest.TestCase): def test_stopwatch(self): w = PyStopwatch() w.start('a') time.sleep(1)" ]
[ "logger.debug(\"Add input for node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for", "__init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self):", ":return: \"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is", "Quantum Espresso Foundation and SISSA (Scuola # Internazionale Superiore di Studi Avanzati). All", "filename): \"\"\" Write the XML configuration to a Fortran input. :param filename: :return:", "= etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with dict '{1}'\".format(elem.tag,", "= schema.get_attributes(path) defaults_dict = {} defaults_path_keys = [] try: # Add default values", "path)) path_key = '%s/%s' % (rel_path, attr_name) if path_key not in qe_input: logger.debug(\"Attribute's", "TdInputConverter ) def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum", "Authors: <NAME> # import logging import os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter,", "# Add default values for attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value =", "(rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except", "try: # Add default values for attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value", "NEB XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' %", "root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert", "x: x.startswith(input_path) and self.find(x) is None, schema.elements ): rel_path = path.replace(input_path, '.') tag", "'{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name, value in elem.attrib.items():", "schema. :param filename: :return: \"\"\" return self def write_qe_input(self, filename): \"\"\" Write the", "to False. :param use_defaults: :return: the input as obtained from its input builder", "TD_spctInConverter from .exceptions import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import", "to manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file =", "rights reserved. # This file is distributed under the terms of the MIT", "% rel_path if xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]]", "subtree for path in filter( lambda x: x.startswith(input_path) and self.find(x) is None, schema.elements", "values for attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value", "return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is not None: raise ConfigError(\"Configuration not", "from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class", "# import logging import os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter", "not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" %", "if use_defaults: # Add defaults for elements not included in input XML subtree", "defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path) if default_value is not None: path_key", "get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input with use_defaults set to False.", "def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs \"\"\"", "XML schema based configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder =", "'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def", "xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is not None: path_key =", "All rights reserved. # This file is distributed under the terms of the", "default_value = schema.get_element_default(path) if default_value is not None: path_key = '%s/_text' % rel_path", ".exceptions import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger", "NebDocument(QeDocument): \"\"\" Class to manage NEB XML documents. \"\"\" def __init__(self): self._input_tag =", "super get_qe_input with use_defaults set to False. :param use_defaults: :return: the input as", "is an abstract implementation, use a subclass!\") def get_qe_input(self, use_defaults=True): if self._document is", "node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name, value in", "This file is distributed under the terms of the MIT License. See the", "http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging import os.path from .converters import PwInputConverter,", "# # Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola # Internazionale", "2015-2016, Quantum Espresso Foundation and SISSA (Scuola # Internazionale Superiore di Studi Avanzati).", "PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument", "to manage TDDFPT \"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' %", "loaded!\") # fortran_input = self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class to manage", "def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def", "value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path)", "License. See the # file 'LICENSE' in the root directory of the present", "logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for XML schema based configurations.", "for node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name, value", "in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add defaults", "NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from", "self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def", "\"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder = TD_spctInConverter", "to XML old parameters to correspondent parameter in XML schema. :param filename: :return:", "'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input' class", "Write the XML configuration to a Fortran input. :param filename: :return: \"\"\" with", "= path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {}", "\"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace", "See the # file 'LICENSE' in the root directory of the present distribution,", "not in qe_input: logger.debug(\"Element's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key,", "not in qe_input: logger.debug(\"Attribute's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key,", "PwDocument(QeDocument): \"\"\" Class to manage PW XML documents. \"\"\" def __init__(self): self._input_tag =", "self._document is None: raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema", "continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add defaults for elements not included", "defaults for elements not included in input XML subtree for path in filter(", "get_qe_input calling super get_qe_input with use_defaults set to False. :param use_defaults: :return: the", "'./input' class PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML documents. \"\"\" def __init__(self):", "__init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist =", "Class to manage TDDFPT \"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd'", "% (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key)", "self def write_qe_input(self, filename): \"\"\" Write the XML configuration to a Fortran input.", "Class to manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file", "in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults)", "XML document for elem, path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict", "XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)),", "not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path() input_root =", "defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is not None: raise ConfigError(\"Configuration", "self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return", "x.startswith(input_path) and self.find(x) is None, schema.elements ): rel_path = path.replace(input_path, '.') tag =", "def load_fortran_input(self, filename): if self._document is not None: raise ConfigError(\"Configuration not loaded!\") #", "def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\" def", "path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path,", "node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with dict", "self.schema input_path = self.get_input_path() input_root = self.find(input_path) # Extract values from input's subtree", "values from input's subtree of the XML document for elem, path in etree_iter_path(input_root,", "schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) #", "value defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path) if default_value is not None:", "xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return", "= 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder = TD_spctInConverter ) def get_input_path(self): return", "None class PwDocument(QeDocument): \"\"\" Class to manage PW XML documents. \"\"\" def __init__(self):", "self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace) ) def", "this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map from a Fortran input", "= 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH'", "= '%s/_text' % rel_path if xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value =", "= schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in defaults_path_keys:", "= '%s/%s' % (rel_path, attr_name) if path_key not in qe_input: logger.debug(\"Attribute's path '%s'", "input_builder=NebInputConverter ) def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT", "etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add", "configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace =", "not None: raise ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder() return None class", "self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\"", "\"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an", "schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key,", "input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling", "Superiore di Studi Avanzati). All rights reserved. # This file is distributed under", "self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path() input_root = self.find(input_path) # Extract values", "manage PW XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd'", "'./inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input with use_defaults set", "if path_key not in qe_input: logger.debug(\"Element's path '%s' not in converter!\" % path_key)", "value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag,", "raise NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\"", "def get_qe_input(self, use_defaults=True): if self._document is None: raise ConfigError(\"Configuration not loaded!\") qe_input =", "attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is not None: path_key", "% os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class to", "Fortran input to XML old parameters to correspondent parameter in XML schema. :param", "= self.schema input_path = self.get_input_path() input_root = self.find(input_path) # Extract values from input's", "from input's subtree of the XML document for elem, path in etree_iter_path(input_root, path=input_path):", "default values for attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if", "attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError:", "(Scuola # Internazionale Superiore di Studi Avanzati). All rights reserved. # This file", "and SISSA (Scuola # Internazionale Superiore di Studi Avanzati). All rights reserved. #", "path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document", "None: raise ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder() return None class PwDocument(QeDocument):", "an abstract implementation, use a subclass!\") def get_qe_input(self, use_defaults=True): if self._document is None:", "if default_value is not None: path_key = '%s/%s' % (rel_path, attr_name) xsd_type =", "'LICENSE' in the root directory of the present distribution, or # http://opensource.org/licenses/MIT. #", "from a Fortran input to XML old parameters to correspondent parameter in XML", "% rel_path if schema.get_attributes(path) else rel_path if path_key not in qe_input: logger.debug(\"Element's path", "'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input' class", "xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is not None: path_key = '%s/%s' %", "use_defaults=True): if self._document is None: raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema", "etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for XML schema based", "subclass!\") def get_qe_input(self, use_defaults=True): if self._document is None: raise ConfigError(\"Configuration not loaded!\") qe_input", "not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add", "__init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self):", "present distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging import os.path", "path_key = '%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name]", "# Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola # Internazionale Superiore", "SISSA (Scuola # Internazionale Superiore di Studi Avanzati). All rights reserved. # This", "not implemented for this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map from", "documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter", "= self.find(input_path) # Extract values from input's subtree of the XML document for", "from its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to", "self.find(input_path) # Extract values from input's subtree of the XML document for elem,", "% path) path_key = '%s/_text' % rel_path if schema.get_attributes(path) else rel_path if path_key", "defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path) if default_value is", "input_builder = TdInputConverter ) def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class to", "input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph',", "rel_path if schema.get_attributes(path) else rel_path if path_key not in qe_input: logger.debug(\"Element's path '%s'", "# fortran_input = self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class to manage PW", "def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input", ") def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML", "the MIT License. See the # file 'LICENSE' in the root directory of", "# Internazionale Superiore di Studi Avanzati). All rights reserved. # This file is", "f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an abstract implementation, use a subclass!\")", "= value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def", "{}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map from a Fortran input to XML", "'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder = TD_spctInConverter ) def get_input_path(self): return '.'", "the terms of the MIT License. See the # file 'LICENSE' in the", "= [] try: # Add default values for attributes for attr_name, xsd_attribute in", "path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}'", "a Fortran input to XML old parameters to correspondent parameter in XML schema.", "= xsd_attribute.get_default() if default_value is not None: path_key = '%s/%s' % (rel_path, attr_name)", "in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is", "super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr',", "attribute '%s' of element '%s'\" % (attr_name, path)) path_key = '%s/%s' % (rel_path,", "attr_name) if path_key not in qe_input: logger.debug(\"Attribute's path '%s' not in converter!\" %", "or # http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging import os.path from .converters", "path_key = '%s/_text' % rel_path if xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value", "in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is not None: path_key = '%s/%s'", "= schema.get_element_default(path) if default_value is not None: path_key = '%s/_text' % rel_path if", "path) path_key = '%s/_text' % rel_path if schema.get_attributes(path) else rel_path if path_key not", "tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys = []", "self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ]))", "node_dict) if use_defaults: # Add defaults for elements not included in input XML", "xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass default_value =", "'.' class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag", "class for XML schema based configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file)", "value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self,", "document for elem, path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict =", "if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace)", "Phonon XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' %", "converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path) path_key", "xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path) if default_value", "Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola # Internazionale Superiore di", "else rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for", "path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element", "schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map from a Fortran input to", "file is distributed under the terms of the MIT License. See the #", "['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented", "logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for XML schema based configurations. \"\"\" def", "qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path() input_root = self.find(input_path) #", "defaults_dict = {} defaults_path_keys = [] try: # Add default values for attributes", "from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for", "set to False. :param use_defaults: :return: the input as obtained from its input", "is distributed under the terms of the MIT License. See the # file", "rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key", "except AttributeError: pass default_value = schema.get_element_default(path) if default_value is not None: path_key =", "Class to manage PW XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument,", "= 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input'", "import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for XML schema", "self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self): return '.' class", "the # file 'LICENSE' in the root directory of the present distribution, or", "ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path() input_root", "to correspondent parameter in XML schema. :param filename: :return: \"\"\" return self def", "terms of the MIT License. See the # file 'LICENSE' in the root", "default_value = xsd_attribute.get_default() if default_value is not None: path_key = '%s/%s' % (rel_path,", "False. :param use_defaults: :return: the input as obtained from its input builder \"\"\"", "get_input_path(self): raise NotImplemented(\"This is an abstract implementation, use a subclass!\") def get_qe_input(self, use_defaults=True):", "with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert", "documents. \"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter", "use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input with use_defaults set to False. :param", "filename): if self._document is not None: raise ConfigError(\"Configuration not loaded!\") # fortran_input =", "self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage NEB XML documents. \"\"\" def __init__(self):", "etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract", "get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\" def __init__(self):", "input XML subtree for path in filter( lambda x: x.startswith(input_path) and self.find(x) is", "self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class to manage PW XML documents. \"\"\"", "self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return", "elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path) path_key = '%s/_text' % rel_path if", "os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class", "Add defaults for elements not included in input XML subtree for path in", "included in input XML subtree for path in filter( lambda x: x.startswith(input_path) and", "= self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class to manage PW XML documents.", "\"\"\" Class to manage TDDFPT \"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__(", "'qes_spectrum' ])) if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented for this", "return '.' class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs \"\"\" def __init__(self):", "utf-8 -*- # # Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola", "def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def", "xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class", "from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from", "get_qe_input with use_defaults set to False. :param use_defaults: :return: the input as obtained", "% path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path) path_key =", "xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass", "logger.debug(\"Convert attribute '%s' of element '%s'\" % (attr_name, path)) path_key = '%s/%s' %", "# Authors: <NAME> # import logging import os.path from .converters import PwInputConverter, PhononInputConverter,", ":return: \"\"\" return self def write_qe_input(self, filename): \"\"\" Write the XML configuration to", "= TdInputConverter ) def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class to manage", "class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\" def __init__(self): self._input_tag = 'input'", "if default_value is not None: path_key = '%s/_text' % rel_path if xsd_attributes else", "xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\"", "node_dict)) # Convert attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of", "to manage Phonon XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__(", "self._document is not None: raise ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder() return", ".converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from .xsdtypes", "input_path = self.get_input_path() input_root = self.find(input_path) # Extract values from input's subtree of", "dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute", "node_dict) logger.debug(\"Convert element '%s'\" % path) path_key = '%s/_text' % rel_path if schema.get_attributes(path)", "super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input' class TdDocument(QeDocument):", "'%s'\" % (attr_name, path)) path_key = '%s/%s' % (rel_path, attr_name) if path_key not", "schema.get_attributes(path) else rel_path if path_key not in qe_input: logger.debug(\"Element's path '%s' not in", "(attr_name, path)) path_key = '%s/%s' % (rel_path, attr_name) if path_key not in qe_input:", "% path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add defaults for elements", "a subclass!\") def get_qe_input(self, use_defaults=True): if self._document is None: raise ConfigError(\"Configuration not loaded!\")", "input_builder=PwInputConverter ) def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class to manage Phonon", "def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def", "return './input' class PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML documents. \"\"\" def", "\"\"\" Class to manage PW XML documents. \"\"\" def __init__(self): self._input_tag = 'input'", "builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage NEB XML", "# Add defaults for elements not included in input XML subtree for path", "None: path_key = '%s/_text' % rel_path if xsd_attributes else rel_path xsd_type = schema.get_element_type(path)", "rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for", "to a Fortran input. :param filename: :return: \"\"\" with open(filename, mode='w+') as f:", "self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in qe_nslist:", "elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\" % (attr_name, path)) path_key = '%s/%s'", "in XML schema. :param filename: :return: \"\"\" return self def write_qe_input(self, filename): \"\"\"", "# file 'LICENSE' in the root directory of the present distribution, or #", "'{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s'", "XML schema. :param filename: :return: \"\"\" return self def write_qe_input(self, filename): \"\"\" Write", "logger.debug(\"Convert element '%s'\" % path) path_key = '%s/_text' % rel_path if schema.get_attributes(path) else", "\"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage NEB XML documents.", "qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add defaults for elements not included in", "file 'LICENSE' in the root directory of the present distribution, or # http://opensource.org/licenses/MIT.", "with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an abstract", "os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class to manage", "Espresso Foundation and SISSA (Scuola # Internazionale Superiore di Studi Avanzati). All rights", "__init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder = TD_spctInConverter ) def", "overrides get_qe_input calling super get_qe_input with use_defaults set to False. :param use_defaults: :return:", "not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace) )", "\"\"\" Class to manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__(", "not included in input XML subtree for path in filter( lambda x: x.startswith(input_path)", "PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from .xsdtypes import etree_node_to_dict,", "'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self): return", "Studi Avanzati). All rights reserved. # This file is distributed under the terms", "old parameters to correspondent parameter in XML schema. :param filename: :return: \"\"\" return", "manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)),", "input as obtained from its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument):", "lambda x: x.startswith(input_path) and self.find(x) is None, schema.elements ): rel_path = path.replace(input_path, '.')", "value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\" % (attr_name, path)) path_key", "'%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\"", "def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML documents.", "def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder = TD_spctInConverter )", "qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in qe_nslist: raise", "parameters to correspondent parameter in XML schema. :param filename: :return: \"\"\" return self", "schema.get_attributes(path) defaults_dict = {} defaults_path_keys = [] try: # Add default values for", "rel_path if path_key not in qe_input: logger.debug(\"Element's path '%s' not in converter!\" %", "self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace", "documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter", "os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input", "XmlDocument from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class", "not None: path_key = '%s/_text' % rel_path if xsd_attributes else rel_path xsd_type =", "self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\"", "for this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map from a Fortran", "parameter in XML schema. :param filename: :return: \"\"\" return self def write_qe_input(self, filename):", "filename): \"\"\" Map from a Fortran input to XML old parameters to correspondent", "NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map", "% (attr_name, path)) path_key = '%s/%s' % (rel_path, attr_name) if path_key not in", "path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys", "xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key)", "the root directory of the present distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME>", "class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag =", "{} defaults_path_keys = [] try: # Add default values for attributes for attr_name,", "write_qe_input(self, filename): \"\"\" Write the XML configuration to a Fortran input. :param filename:", "'%s' of element '%s'\" % (attr_name, path)) path_key = '%s/%s' % (rel_path, attr_name)", "= '%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] =", "turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder", "schema based configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder", "is not None: path_key = '%s/_text' % rel_path if xsd_attributes else rel_path xsd_type", "= 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self):", "defaults_path_keys = [] try: # Add default values for attributes for attr_name, xsd_attribute", "as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an abstract implementation, use a", ") def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\"", "def read_qe_input(self, filename): \"\"\" Map from a Fortran input to XML old parameters", "open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an abstract implementation,", "Abstract class for XML schema based configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument,", "qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is not None: raise ConfigError(\"Configuration not loaded!\")", "XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)),", "= 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input'", "os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError", "etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with dict '{1}'\".format(elem.tag, node_dict))", "= '%s/_text' % rel_path if schema.get_attributes(path) else rel_path if path_key not in qe_input:", "path=input_path): rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input", "raise NotImplemented(\"This is an abstract implementation, use a subclass!\") def get_qe_input(self, use_defaults=True): if", "is not None: path_key = '%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value", "MIT License. See the # file 'LICENSE' in the root directory of the", "pass default_value = schema.get_element_default(path) if default_value is not None: path_key = '%s/_text' %", "default_value is not None: path_key = '%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type", "in qe_nslist: raise NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace) ) def read_qe_input(self,", "converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add defaults for", "Add default values for attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default()", "load_fortran_input(self, filename): if self._document is not None: raise ConfigError(\"Configuration not loaded!\") # fortran_input", "in qe_input: logger.debug(\"Attribute's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag,", "input. :param filename: :return: \"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self):", "abstract implementation, use a subclass!\") def get_qe_input(self, use_defaults=True): if self._document is None: raise", "class NebDocument(QeDocument): \"\"\" Class to manage NEB XML documents. \"\"\" def __init__(self): self._input_tag", "QeDocument(XmlDocument): \"\"\" Abstract class for XML schema based configurations. \"\"\" def __init__(self, xsd_file,", "elements not included in input XML subtree for path in filter( lambda x:", "= self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in", "if xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value", "% os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides", "TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree", "qe_nslist: raise NotImplementedError(\"Converter not implemented for this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename):", "input to XML old parameters to correspondent parameter in XML schema. :param filename:", ":param filename: :return: \"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise", "coding: utf-8 -*- # # Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA", ":param filename: :return: \"\"\" return self def write_qe_input(self, filename): \"\"\" Write the XML", "Class to manage NEB XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument,", "qe_input: logger.debug(\"Attribute's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict)", "obtained from its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class", "AttributeError: pass default_value = schema.get_element_default(path) if default_value is not None: path_key = '%s/_text'", "if self._document is not None: raise ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder()", "XML old parameters to correspondent parameter in XML schema. :param filename: :return: \"\"\"", "PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML documents. \"\"\" def __init__(self): self._input_tag =", "is not None: raise ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder() return None", "])) if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented for this schema", "input_root = self.find(input_path) # Extract values from input's subtree of the XML document", "schema.elements ): rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path)", "use a subclass!\") def get_qe_input(self, use_defaults=True): if self._document is None: raise ConfigError(\"Configuration not", "distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging import os.path from", "Extract values from input's subtree of the XML document for elem, path in", "attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is not", "path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path) path_key = '%s/_text'", "(rel_path, attr_name) if path_key not in qe_input: logger.debug(\"Attribute's path '%s' not in converter!\"", "= self.get_input_path() input_root = self.find(input_path) # Extract values from input's subtree of the", "default_value is not None: path_key = '%s/_text' % rel_path if xsd_attributes else rel_path", "= xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path) if", "use_defaults: # Add defaults for elements not included in input XML subtree for", "= xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value defaults_path_keys.append(path_key) except AttributeError: pass default_value", "<NAME> # import logging import os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter,", "and self.find(x) is None, schema.elements ): rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/',", "filename: :return: \"\"\" return self def write_qe_input(self, filename): \"\"\" Write the XML configuration", "mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an abstract implementation, use", "loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path() input_root = self.find(input_path)", "import logging import os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from", "in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\" % (attr_name, path)) path_key =", "\"\"\" Map from a Fortran input to XML old parameters to correspondent parameter", "reserved. # This file is distributed under the terms of the MIT License.", "defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input()", "super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self): return '.'", "self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder = TD_spctInConverter ) def get_input_path(self):", "SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs \"\"\" def __init__(self): self._input_tag = 'input'", "\"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter )", "Map from a Fortran input to XML old parameters to correspondent parameter in", "'.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys =", "TDDFPT \"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder", ") def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super", "correspondent parameter in XML schema. :param filename: :return: \"\"\" return self def write_qe_input(self,", "class PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML documents. \"\"\" def __init__(self): self._input_tag", "read_qe_input(self, filename): \"\"\" Map from a Fortran input to XML old parameters to", "in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path)", "attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\" % (attr_name, path))", "for elements not included in input XML subtree for path in filter( lambda", "self.find(x) is None, schema.elements ): rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1]", "filter( lambda x: x.startswith(input_path) and self.find(x) is None, schema.elements ): rel_path = path.replace(input_path,", "to manage PW XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__(", "xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get,", "\"\"\" return self def write_qe_input(self, filename): \"\"\" Write the XML configuration to a", "# -*- coding: utf-8 -*- # # Copyright (c), 2015-2016, Quantum Espresso Foundation", "its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage", "'%s/_text' % rel_path if xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value)", "% (rel_path, attr_name) if path_key not in qe_input: logger.debug(\"Attribute's path '%s' not in", "Fortran input. :param filename: :return: \"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def", "path_key = '%s/%s' % (rel_path, attr_name) if path_key not in qe_input: logger.debug(\"Attribute's path", "import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger =", "for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if", "path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: # Add defaults for elements not", "element '%s'\" % (attr_name, path)) path_key = '%s/%s' % (rel_path, attr_name) if path_key", "a Fortran input. :param filename: :return: \"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input())", "rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys = [] try: #", "-*- # # Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola #", "the XML document for elem, path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.')", "of the present distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging", "defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename):", "return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage NEB XML documents. \"\"\"", "defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is not", "under the terms of the MIT License. See the # file 'LICENSE' in", "not None: path_key = '%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value =", "path_key = '%s/_text' % rel_path if schema.get_attributes(path) else rel_path if path_key not in", "for elem, path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem,", "for XML schema based configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder", "None, schema.elements ): rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes =", "# http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging import os.path from .converters import", "for attributes for attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is", "Foundation and SISSA (Scuola # Internazionale Superiore di Studi Avanzati). All rights reserved.", "\"\"\" overrides get_qe_input calling super get_qe_input with use_defaults set to False. :param use_defaults:", "import os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import", "inputs \"\"\" def __init__(self): self._input_tag = 'input' super(SpectrumDocument,self).__init__( xsd_file = '%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)), input_builder =", "def write_qe_input(self, filename): \"\"\" Write the XML configuration to a Fortran input. :param", "Convert attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\"", "'%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults: #", "\"\"\" Class to manage NEB XML documents. \"\"\" def __init__(self): self._input_tag = 'input'", "for path in filter( lambda x: x.startswith(input_path) and self.find(x) is None, schema.elements ):", "xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self): return '.' class SpectrumDocument(QeDocument):", "XML configuration to a Fortran input. :param filename: :return: \"\"\" with open(filename, mode='w+')", "attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\" %", "def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input with use_defaults set to", "if self._document is None: raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema =", "'%s/_text' % rel_path if schema.get_attributes(path) else rel_path if path_key not in qe_input: logger.debug(\"Element's", "XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)),", ":param use_defaults: :return: the input as obtained from its input builder \"\"\" return", "None: path_key = '%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value)", "rel_path if xsd_attributes else rel_path xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] =", "of element '%s'\" % (attr_name, path)) path_key = '%s/%s' % (rel_path, attr_name) if", "qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path) path_key = '%s/_text' % rel_path", "get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class to manage Phonon XML documents. \"\"\"", "PW XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' %", "super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage NEB XML documents. \"\"\" def", "if schema.get_attributes(path) else rel_path if path_key not in qe_input: logger.debug(\"Element's path '%s' not", "[] try: # Add default values for attributes for attr_name, xsd_attribute in xsd_attributes.items():", "in input XML subtree for path in filter( lambda x: x.startswith(input_path) and self.find(x)", "raise ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder() return None class PwDocument(QeDocument): \"\"\"", "logging import os.path from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions", "implementation, use a subclass!\") def get_qe_input(self, use_defaults=True): if self._document is None: raise ConfigError(\"Configuration", "tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is not None: raise", "ConfigError(\"Configuration not loaded!\") # fortran_input = self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class", "the input as obtained from its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class", "path_key not in qe_input: logger.debug(\"Attribute's path '%s' not in converter!\" % path_key) continue", "schema.get_element_default(path) if default_value is not None: path_key = '%s/_text' % rel_path if xsd_attributes", "from .exceptions import ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path", "\"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter )", "self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False):", "the present distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME> # import logging import", "xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class", "in qe_input: logger.debug(\"Element's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag,", "distributed under the terms of the MIT License. See the # file 'LICENSE'", "def get_input_path(self): raise NotImplemented(\"This is an abstract implementation, use a subclass!\") def get_qe_input(self,", "if path_key not in qe_input: logger.debug(\"Attribute's path '%s' not in converter!\" % path_key)", "f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This is an abstract implementation, use a subclass!\") def", "input's subtree of the XML document for elem, path in etree_iter_path(input_root, path=input_path): rel_path", "manage Phonon XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd'", "\"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder =", "class QeDocument(XmlDocument): \"\"\" Abstract class for XML schema based configurations. \"\"\" def __init__(self,", "\"\"\" Class to manage Phonon XML documents. \"\"\" def __init__(self): self._input_tag = 'input'", "in the root directory of the present distribution, or # http://opensource.org/licenses/MIT. # Authors:", "# Convert attributes for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element", "manage TDDFPT \"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)),", "= value defaults_path_keys.append(path_key) except AttributeError: pass default_value = schema.get_element_default(path) if default_value is not", "= xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in defaults_path_keys: qe_input.set_path(path_key, tag, defaults_dict)", "input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not", "'.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with", "Class to manage Phonon XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(PhononDocument,", "raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path()", "-*- coding: utf-8 -*- # # Copyright (c), 2015-2016, Quantum Espresso Foundation and", "return self def write_qe_input(self, filename): \"\"\" Write the XML configuration to a Fortran", "filename: :return: \"\"\" with open(filename, mode='w+') as f: f.write(self.get_qe_input()) def get_input_path(self): raise NotImplemented(\"This", "import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter from .exceptions import ConfigError from .xsdtypes import", "qe_input.set_path(path_key, tag, defaults_dict) return qe_input.get_qe_input() def load_fortran_input(self, filename): if self._document is not None:", "= path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults) logger.debug(\"Add input for node", "calling super get_qe_input with use_defaults set to False. :param use_defaults: :return: the input", "= {} defaults_path_keys = [] try: # Add default values for attributes for", "di Studi Avanzati). All rights reserved. # This file is distributed under the", "xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys = [] try: # Add default", "Avanzati). All rights reserved. # This file is distributed under the terms of", "of the XML document for elem, path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path,", "def __init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter", "implemented for this schema {}\".format(self.default_namespace) ) def read_qe_input(self, filename): \"\"\" Map from a", "element '%s'\" % path) path_key = '%s/_text' % rel_path if schema.get_attributes(path) else rel_path", "get_qe_input(self, use_defaults=True): if self._document is None: raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file)", "input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\" Class to manage NEB", "to manage NEB XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__(", "not loaded!\") # fortran_input = self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class to", "logger.debug(\"Attribute's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert", "use_defaults: :return: the input as obtained from its input builder \"\"\" return super(PhononDocument,", "import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\"", "use_defaults set to False. :param use_defaults: :return: the input as obtained from its", "manage NEB XML documents. \"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd'", "# Extract values from input's subtree of the XML document for elem, path", "in filter( lambda x: x.startswith(input_path) and self.find(x) is None, schema.elements ): rel_path =", "def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist", "qe_input: logger.debug(\"Element's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict)", "path in filter( lambda x: x.startswith(input_path) and self.find(x) is None, schema.elements ): rel_path", "NotImplemented(\"This is an abstract implementation, use a subclass!\") def get_qe_input(self, use_defaults=True): if self._document", "else rel_path if path_key not in qe_input: logger.debug(\"Element's path '%s' not in converter!\"", "__init__(self): self._input_tag = 'input' super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self):", "= list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter", "class PwDocument(QeDocument): \"\"\" Class to manage PW XML documents. \"\"\" def __init__(self): self._input_tag", "% os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter ) def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\"", "None: raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path =", "__init__(self): self._input_tag = 'input' super(TdDocument, self).__init__( xsd_file='%s/scheme/tddfpt.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder = TdInputConverter )", "= input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if", ") def get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs", "self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return", "use_defaults=use_defaults) logger.debug(\"Add input for node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes", "continue qe_input.set_path(path_key, elem.tag, node_dict) logger.debug(\"Convert element '%s'\" % path) path_key = '%s/_text' %", ".xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for XML", "rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict =", "get_input_path(self): return '.' class SpectrumDocument(QeDocument): \"\"\" Class to manage turbo-spectrum inputs \"\"\" def", "logger.debug(\"Element's path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if", "\"\"\" def __init__(self): self._input_tag = 'input' super(NebDocument, self).__init__( xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter )", "get_input_path(self): return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input with", "self.get_input_path() input_root = self.find(input_path) # Extract values from input's subtree of the XML", "# This file is distributed under the terms of the MIT License. See", "based configurations. \"\"\" def __init__(self, xsd_file, input_builder): super(QeDocument, self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace", "1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys = [] try: # Add", "is None: raise ConfigError(\"Configuration not loaded!\") qe_input = self.input_builder(xml_file=self._config_file) schema = self.schema input_path", "): rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict", "path '%s' not in converter!\" % path_key) continue qe_input.set_path(path_key, elem.tag, node_dict) if use_defaults:", "'%s'\" % path) path_key = '%s/_text' % rel_path if schema.get_attributes(path) else rel_path if", "'%s/%s' % (rel_path, attr_name) xsd_type = xsd_attribute.xsd_type value = xsd_type.decode(default_value) defaults_dict[attr_name] = value", "super(PhononDocument, self).__init__( xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PhononInputConverter ) def get_input_path(self): return './inputPH' def get_qe_input(self,", "= rel_path.rsplit('/', 1)[-1] xsd_attributes = schema.get_attributes(path) defaults_dict = {} defaults_path_keys = [] try:", "return None class PwDocument(QeDocument): \"\"\" Class to manage PW XML documents. \"\"\" def", "<reponame>QEF/qexsd # -*- coding: utf-8 -*- # # Copyright (c), 2015-2016, Quantum Espresso", "XML subtree for path in filter( lambda x: x.startswith(input_path) and self.find(x) is None,", "return './input' class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\" def __init__(self): self._input_tag", "elem.tag, node_dict) if use_defaults: # Add defaults for elements not included in input", "as obtained from its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults) class NebDocument(QeDocument): \"\"\"", "= logging.getLogger('qespresso') class QeDocument(XmlDocument): \"\"\" Abstract class for XML schema based configurations. \"\"\"", "'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not implemented for", "(c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola # Internazionale Superiore di Studi", "super(PwDocument, self).__init__( xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input' class PhononDocument(QeDocument):", ":return: the input as obtained from its input builder \"\"\" return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults)", "\"\"\" Abstract class for XML schema based configurations. \"\"\" def __init__(self, xsd_file, input_builder):", "with use_defaults set to False. :param use_defaults: :return: the input as obtained from", "root directory of the present distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME> #", "elem, path in etree_iter_path(input_root, path=input_path): rel_path = path.replace(input_path, '.') node_dict = etree_node_to_dict(elem, schema,", ".xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso') class QeDocument(XmlDocument):", "= self.input_builder(xml_file=self._config_file) schema = self.schema input_path = self.get_input_path() input_root = self.find(input_path) # Extract", "self).__init__(xsd_file) self.input_builder = input_builder self.default_namespace = self.schema.target_namespace qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum'", "'%s/%s' % (rel_path, attr_name) if path_key not in qe_input: logger.debug(\"Attribute's path '%s' not", "os.path.dirname(os.path.abspath(__file__)), input_builder=PwInputConverter ) def get_input_path(self): return './input' class PhononDocument(QeDocument): \"\"\" Class to manage", "schema = self.schema input_path = self.get_input_path() input_root = self.find(input_path) # Extract values from", "list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ])) if not self.default_namespace in qe_nslist: raise NotImplementedError(\"Converter not", "xsd_type = schema.get_element_type(path) value = xsd_type.decode(default_value) defaults_dict[path_key.rsplit(\"/\")[-1]] = value defaults_path_keys.append(path_key) for path_key in", "'./input' class TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\" def __init__(self): self._input_tag =", "fortran_input = self.input_builder() return None class PwDocument(QeDocument): \"\"\" Class to manage PW XML", "return './inputPH' def get_qe_input(self, use_defaults=False): \"\"\" overrides get_qe_input calling super get_qe_input with use_defaults", "input for node '{0}' with dict '{1}'\".format(elem.tag, node_dict)) # Convert attributes for attr_name,", "Internazionale Superiore di Studi Avanzati). All rights reserved. # This file is distributed", "configuration to a Fortran input. :param filename: :return: \"\"\" with open(filename, mode='w+') as", "\"\"\" Write the XML configuration to a Fortran input. :param filename: :return: \"\"\"", ") def read_qe_input(self, filename): \"\"\" Map from a Fortran input to XML old", "for attr_name, value in elem.attrib.items(): logger.debug(\"Convert attribute '%s' of element '%s'\" % (attr_name,", "path_key not in qe_input: logger.debug(\"Element's path '%s' not in converter!\" % path_key) continue", "the XML configuration to a Fortran input. :param filename: :return: \"\"\" with open(filename,", "% os.path.dirname(os.path.abspath(__file__)), input_builder=NebInputConverter ) def get_input_path(self): return './input' class TdDocument(QeDocument): \"\"\" Class to", "xsd_attribute.get_default() if default_value is not None: path_key = '%s/%s' % (rel_path, attr_name) xsd_type", "for attr_name, xsd_attribute in xsd_attributes.items(): default_value = xsd_attribute.get_default() if default_value is not None:", "TdDocument(QeDocument): \"\"\" Class to manage TDDFPT \"\"\" def __init__(self): self._input_tag = 'input' super(TdDocument,", "is None, schema.elements ): rel_path = path.replace(input_path, '.') tag = rel_path.rsplit('/', 1)[-1] xsd_attributes", "subtree of the XML document for elem, path in etree_iter_path(input_root, path=input_path): rel_path =", "directory of the present distribution, or # http://opensource.org/licenses/MIT. # Authors: <NAME> # import", "ConfigError from .xsdtypes import etree_node_to_dict, XmlDocument from .xsdtypes.etree import etree_iter_path logger = logging.getLogger('qespresso')", "of the MIT License. See the # file 'LICENSE' in the root directory" ]
[ "import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields = ('id', 'created',", "data.models import TestModel from rest_framework import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model =", "serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields = ('id', 'created', 'updated',", "ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields = ('id', 'created', 'updated', 'method_field') method_field", "rest_framework import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields = ('id',", "from rest_framework import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields =", "model = TestModel fields = ('id', 'created', 'updated', 'method_field') method_field = serializers.SerializerMethodField() def", "from data.models import TestModel from rest_framework import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model", "class Meta: model = TestModel fields = ('id', 'created', 'updated', 'method_field') method_field =", "= TestModel fields = ('id', 'created', 'updated', 'method_field') method_field = serializers.SerializerMethodField() def get_method_field(self,", "TestModel fields = ('id', 'created', 'updated', 'method_field') method_field = serializers.SerializerMethodField() def get_method_field(self, obj):", "Meta: model = TestModel fields = ('id', 'created', 'updated', 'method_field') method_field = serializers.SerializerMethodField()", "fields = ('id', 'created', 'updated', 'method_field') method_field = serializers.SerializerMethodField() def get_method_field(self, obj): return", "class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields = ('id', 'created', 'updated', 'method_field')", "import TestModel from rest_framework import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel", "= ('id', 'created', 'updated', 'method_field') method_field = serializers.SerializerMethodField() def get_method_field(self, obj): return 'works!'", "TestModel from rest_framework import serializers class ExampleSerializer(serializers.ModelSerializer): class Meta: model = TestModel fields" ]
[ "slots = Slots(\"keke\", 24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 #", "A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst): mro = inst.__class__.mro()", "sqrt, pow class Point(object): def __init__(self, x, y): print 'initialize x and y'", "# 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type", "super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ # / \\ # A B", "function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 #", "'after yield' # # with point(3, 4) as value: # print 'value is:", "\"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x) = unicode #", "x, y = y, x + y yield x f = fib() for", "(\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ # / \\", "+----------+ # | ascii| decode | | # | str gbk +------------>+ unicode", "# 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的 # item 组成一个", "把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值,", "# 转化为字典 _attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo 方法 #", "\"\"\" # 通过yield实现 # from contextlib import contextmanager # # @contextmanager # def", "yield' # # with point(3, 4) as value: # print 'value is: %s'", "value in _attrs) # 转化为字典 _attrs['echo'] = lambda self, phrase: phrase # 增加了一个", "type.__new__(cls, name, bases, _attrs) # 返回创建后的类 # py2 class Foo(object): __metaclass__ = PrefixMetaclass", "+ 1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls 在当前 MRO 列表中的 index,", "除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) # 返回创建后的类 # py2 class Foo(object): __metaclass__", "# Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ # | ascii| decode |", "sequence[, initial]) # 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 # sequence", "的 # item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 #", "元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # | | instance of", "'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ # | ascii|", "* 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x % 2", "copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy) # 偏函数 \"\"\" from functools", "# func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6),", "UnicodeDecodeError。 # >>> s = '你好' # str 类型, utf-8 编码 # >>>", ">>> str(u_str) # Traceback (most recent call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法", "| # +----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 #", "last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y,", "= x, y def __enter__(self): print \"Entering context\" return self def __exit__(self, type,", "'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x =", "\"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) #", "value # # # output # before yield # value is: 25 #", "__enter__(self): print \"Entering context\" return self def __exit__(self, type, value, traceback): print \"Exiting", "value, traceback): print \"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y,", "B # \\ / # \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B,", "super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls 在当前 MRO", "= \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding': 'ascii'", "# sequence 的下一个 item 再传给 function, reduce(lambda x, y: x * y, [1,", "# py3 # class Foo(metaclass=PrefixMetaclass): # name = 'foo' # def bar(self): #", "slots。\" slots = Slots(\"keke\", 24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板", "with point(3, 4) as value: # print 'value is: %s' % value #", "bases, _attrs) # 返回创建后的类 # py2 class Foo(object): __metaclass__ = PrefixMetaclass name =", "的三个参数,即 type, value 和 traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。", "def __exit__(self, type, value, traceback): print \"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x,", "**{}, 打包,使用时解包 # func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4,", "item 再传给 function, reduce(lambda x, y: x * y, [1, 2, 3, 4])", "\"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自", "y, [1, 2, 3, 4]) # 相当于 ((1 * 2) * 3) *", "with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有", "# 相当于from multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda x: x * x,", "+----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_", "x, y): print 'initialize x and y' self.x, self.y = x, y def", "# yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到", "unicode 类型 # >>> s + u # 会进行隐式转换,即 s.decode('ascii') + u #", "echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) # 返回创建后的类 #", "# / \\ # / \\ # A B # \\ / #", "map(function, sequence) # 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x:", "item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x * x, [1, 2, 3,", "- y f = partial(subtraction, 4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\"", "2, 3, 4, 5, 6])) # 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为", "# from contextlib import contextmanager # # @contextmanager # def point(x, y): #", "# 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # | | instance of | |", "shadow_copy = [1, 2, 3, [4, 5, 6]] sha = shadow_copy.copy() print(sha, \"", "5, 6]] sha = shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0] = 100", "\", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy) #", "语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib():", "# 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 #", "'value is: %s' % value # # # output # before yield #", "inst): mro = inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls 在当前 MRO 列表中的", "x * x, [1, 2, 3, 4]) # 使用 lambda lamda args: #", "context\" return self def __exit__(self, type, value, traceback): print \"Exiting context\" def get_distance(self):", "的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法)", "recent call last): # 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2", "来创建它 # 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>>", "utf8 |<------------| | # | 字节码 | encode | | # +----------+ +----------+", "= (x for x in range(5)) 生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。", "https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" #", "# 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html", "(取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响", "self, phrase: phrase # 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name,", "f = partial(subtraction, 4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器", "# # with point(3, 4) as value: # print 'value is: %s' %", "value in attrs.items()) _attrs = dict((name, value) for name, value in _attrs) #", "相当于from multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\",", "| | # | str gbk +------------>+ unicode + # | utf8 |<------------|", "y = 0, 1 while True: x, y = y, x + y", "\\ # / \\ # A B # \\ / # \\ /", "py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ # |", "生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象;", "| 字节码 | encode | | # +----------+ +----------+ # 在python2中,x = \"hello\",", "pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: #", "3, [4, 5, 6]] sha = shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0]", "| str gbk +------------>+ unicode + # | utf8 |<------------| | # |", "sha[0] = 100 # print(sha, \" \", shadow_copy) # sha[3][0] = \"shadow\" #", "import ThreadPool # 相当于from multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda x: x", "中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x", "# 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有 yield", "slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ #", "\"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ # | ascii| decode", "args=(4, 5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence)", "# | instance +------------>+ class +------------>+ metaclass| # | | | | |", "# 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) # 对 sequence 中的", "| | | | # +----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name,", "100 # print(sha, \" \", shadow_copy) # sha[3][0] = \"shadow\" # print(sha, \"", "pool.apply_async(lambda x: x * x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base #", "Foo(object): __metaclass__ = PrefixMetaclass name = 'foo' def bar(self): # print 'bar' pass", "\"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # | |", "| instance of | | instance of | | # | instance +------------>+", "| # | 字节码 | encode | | # +----------+ +----------+ # 在python2中,x", "print(sha, \" \", shadow_copy) # sha[0] = 100 # print(sha, \" \", shadow_copy)", "sequence 的下一个 item 再传给 function, reduce(lambda x, y: x * y, [1, 2,", "**kwargs): *() **{}, 打包,使用时解包 # func(1, 2, 3, 4, 5, 6) x=1, y=2,", "# 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x,", "inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index", "*args, **kwargs): *() **{}, 打包,使用时解包 # func(1, 2, 3, 4, 5, 6) x=1,", "x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ # /", "u'世界' # unicode 类型 # >>> s + u # 会进行隐式转换,即 s.decode('ascii') +", "正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0, *args,", "unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str", "print 'value is: %s' % value # # # output # before yield", "# def bar(self): # # print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\"", "返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码 python2 和 python3 #", "# 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码 python2 和 python3", "# 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2, 3,", "function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple (取决于", "name, value in attrs.items()) _attrs = dict((name, value) for name, value in _attrs)", "y): return x - y f = partial(subtraction, 4) # 4 赋给了 x", "的普通类意味着调用 type 来创建它 # 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import", "* y # print 'after yield' # # with point(3, 4) as value:", "s = '你好' # str 类型, utf-8 编码 # >>> u = u'世界'", "= shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0] = 100 # print(sha, \"", "instance +------------>+ class +------------>+ metaclass| # | | | | | | #", "# 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。", "和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在", "True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收.", "== 0, [1, 2, 3, 4, 5, 6])) # 将 function 依次作用于 sequnce", "x: x % 2 == 0, [1, 2, 3, 4, 5, 6])) #", "\"name\", \"age\" def __init__(self, name, age): self.name = name self.age = age \"使用", "3, 4]) # 相当于 ((1 * 2) * 3) * 4 # filter", "# 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常, # 返回 True", "= inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即", "# | ascii| decode | | # | str gbk +------------>+ unicode +", "import contextmanager # # @contextmanager # def point(x, y): # print 'before yield'", "# 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好' # >>> str(u_str)", "\", shadow_copy) # sha[3][0] = \"shadow\" # print(sha, \" \", shadow_copy) deep =", "= 0, 1 while True: x, y = y, x + y yield", "self.y = x, y def __enter__(self): print \"Entering context\" return self def __exit__(self,", "# # @contextmanager # def point(x, y): # print 'before yield' # yield", "partial def subtraction(x, y): return x - y f = partial(subtraction, 4) #", "事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index", "+ name, value) for name, value in attrs.items()) _attrs = dict((name, value) for", "# https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\"", "self.name = name self.age = age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__", "pool = ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html", "is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用", "import Process pool = ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\", 'args2',)) #", "sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 #", "= ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html #", "Traceback (most recent call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 #", "再传给 function, reduce(lambda x, y: x * y, [1, 2, 3, 4]) #", "它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有 yield 关键字的函数 # yield", "print 'before yield' # yield x * x + y * y #", "在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO 列表", "上下文管理器 \"\"\" from math import sqrt, pow class Point(object): def __init__(self, x, y):", "return distance \"\"\" # 通过yield实现 # from contextlib import contextmanager # # @contextmanager", "after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景,", "成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好' # str 类型, utf-8", "# 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # | | instance", "in f: if key < 10: print(key) # 上下文管理器 \"\"\" from math import", "= \"name\", \"age\" def __init__(self, name, age): self.name = name self.age = age", "attrs.items()) _attrs = dict((name, value) for name, value in _attrs) # 转化为字典 _attrs['echo']", "x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x) =", "unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s", "# 上下文管理器 \"\"\" from math import sqrt, pow class Point(object): def __init__(self, x,", "name, age): self.name = name self.age = age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间,", "unicode # +----------+ +----------+ # | ascii| decode | | # | str", "lamda args: # reduce(function, sequence[, initial]) # 先将 sequence 的前两个 item 传给 function,即", "在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding':", "方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x in range(5))", "10: print(key) # 上下文管理器 \"\"\" from math import sqrt, pow class Point(object): def", "phrase # 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs)", "会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 #", "4 # filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x % 2 ==", "和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding() py2 'ascii' py3", "| encode | | # +----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding':", "x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是", "map(lambda x: x * x, [1, 2, 3, 4]) # 使用 lambda lamda", "class Foo(object): __metaclass__ = PrefixMetaclass name = 'foo' def bar(self): # print 'bar'", "bar(self): # print 'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass): # name =", "# 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和", "Base # / \\ # / \\ # A B # \\ /", "sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode", "y): print 'initialize x and y' self.x, self.y = x, y def __enter__(self):", "slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job = \"computer\" #", "def bar(self): # print 'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass): # name", "Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ # | ascii| decode | |", "# 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next()", "str gbk +------------>+ unicode + # | utf8 |<------------| | # | 字节码", "y def __enter__(self): print \"Entering context\" return self def __exit__(self, type, value, traceback):", "u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8", "x, [1, 2, 3, 4]) # 使用 lambda lamda args: # reduce(function, sequence[,", "copy shadow_copy = [1, 2, 3, [4, 5, 6]] sha = shadow_copy.copy() print(sha,", "# sha[3][0] = \"shadow\" # print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0]", "当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y = 0,", "+ 1] 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取", "赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy =", "fib(): x, y = 0, 1 while True: x, y = y, x", "| | # | instance +------------>+ class +------------>+ metaclass| # | | |", "key in f: if key < 10: print(key) # 上下文管理器 \"\"\" from math", "1] \"\"\" # __slots__ class Slots(object): __slots__ = \"name\", \"age\" def __init__(self, name,", "默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x = \"你好\", chardet.detect(x)", "# 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) # 对 sequence 中的 item 依次执行", "py2 'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+", "不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job", "在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) # 对 sequence 中的 item 依次执行 function(item),并将结果组成一个", "List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 #", "# \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B,", "bar(self): # # print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\"", "\"\"\" from functools import partial def subtraction(x, y): return x - y f", "4, 5, 6])) # 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的", "x * y, [1, 2, 3, 4]) # 相当于 ((1 * 2) *", "cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO", "\"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建", "类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s =", "+----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs", "yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y = 0, 1 while True: x,", "coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode", "Process pool = ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\", 'args2',)) # super函数", "语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收. # 在 Python", "# | | | | | | # +----------+ +----------+ +----------+ class PrefixMetaclass(type):", "__init__(self, name, age): self.name = name self.age = age \"使用 __slots__ 来告诉 Python", "A B # \\ / # \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A,", "y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包 # func(1, 2, 3, 4, 5,", "print(key) # 上下文管理器 \"\"\" from math import sqrt, pow class Point(object): def __init__(self,", "Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls)", "__new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name, value)", "# output # before yield # value is: 25 # after yield #", "class PrefixMetaclass(type): def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_'", "4]) # 相当于 ((1 * 2) * 3) * 4 # filter 函数用于过滤元素,filter(function,", "4) as value: # print 'value is: %s' % value # # #", "# 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给", "Base] def super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls", "partial(subtraction, 4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator", "def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name,", "| | # +----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name, bases, attrs):", "sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2", "https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # | | instance of |", "y): # print 'before yield' # yield x * x + y *", "和 traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 #", "及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2, 3, [4,", "as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value", "# 使用 lambda lamda args: # reduce(function, sequence[, initial]) # 先将 sequence 的前两个", "y # print 'after yield' # # with point(3, 4) as value: #", "# __slots__ class Slots(object): __slots__ = \"name\", \"age\" def __init__(self, name, age): self.name", "字节码 | encode | | # +----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x),", "__exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常, #", "加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html #", "| | instance of | | instance of | | # | instance", "# >>> s + u # 会进行隐式转换,即 s.decode('ascii') + u # Traceback (most", "返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 #", "# 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现", "get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance \"\"\" # 通过yield实现", "as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback", "from functools import partial def subtraction(x, y): return x - y f =", "# 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有 yield 关键字的函数 #", "key < 10: print(key) # 上下文管理器 \"\"\" from math import sqrt, pow class", "str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str", "| | | | | # +----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls,", "的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变", "partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__()", "UnicodeEncodeError。 # >>> u_str = u'你好' # >>> str(u_str) # Traceback (most recent", "range(5)) 生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 #", "age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的", "方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) # 返回创建后的类 # py2", "# # # output # before yield # value is: 25 # after", "类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # | | instance of | | instance", "# 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值", "# py2 class Foo(object): __metaclass__ = PrefixMetaclass name = 'foo' def bar(self): #", "0, 1 while True: x, y = y, x + y yield x", "= y, x + y yield x f = fib() for key in", "生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有 yield 关键字的函数", "chardet.detect(x), 'encoding': 'ascii' # x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x", "= [1, 2, 3, [4, 5, 6]] sha = shadow_copy.copy() print(sha, \" \",", "# | str gbk +------------>+ unicode + # | utf8 |<------------| | #", "# 通过yield实现 # from contextlib import contextmanager # # @contextmanager # def point(x,", "\", shadow_copy) # sha[0] = 100 # print(sha, \" \", shadow_copy) # sha[3][0]", "y f = partial(subtraction, 4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" #", "# 这种函数称之为高阶函数 # map(function, sequence) # 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List", "= u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding()", "+ 1] \"\"\" # __slots__ class Slots(object): __slots__ = \"name\", \"age\" def __init__(self,", "@contextmanager # def point(x, y): # print 'before yield' # yield x *", "# 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收. # 在 Python 中,当一个对象的引用数目为 0 的时候,才会被从内存中回收。但是被循环引用呢?", "0, [1, 2, 3, 4, 5, 6])) # 将 function 依次作用于 sequnce 的每个", "decode | | # | str gbk +------------>+ unicode + # | utf8", "通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as", "[1, 2, 3, [4, 5, 6]] sha = shadow_copy.copy() print(sha, \" \", shadow_copy)", "value 和 traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用", "6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) # 对", "转化为字典 _attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo 方法 # type", "= copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy) # 偏函数 \"\"\" from", "+ u # 会进行隐式转换,即 s.decode('ascii') + u # Traceback (most recent call last):", "Slots(object): __slots__ = \"name\", \"age\" def __init__(self, name, age): self.name = name self.age", "\"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst): mro", "改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2, 3, [4, 5, 6]]", "from math import sqrt, pow class Point(object): def __init__(self, x, y): print 'initialize", "表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收. #", "* y, [1, 2, 3, 4]) # 相当于 ((1 * 2) * 3)", "列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls", "生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来;", "point(x, y): # print 'before yield' # yield x * x + y", "+ # | utf8 |<------------| | # | 字节码 | encode | |", "/ # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def", "mro = inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls 在当前 MRO 列表中的 index,", "\"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job =", "name = 'foo' # def bar(self): # # print 'bar' # pass \"Python", "initial]) # 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个", "3, 4, 5, 6])) # 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True", "执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; #", "type, value, traceback): print \"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x, 2) +", "# after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with", "True 的 # item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝", "依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把", "# @contextmanager # def point(x, y): # print 'before yield' # yield x", "6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, #", "__metaclass__ = PrefixMetaclass name = 'foo' def bar(self): # print 'bar' pass #", ">>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode #", "def __init__(self, x, y): print 'initialize x and y' self.x, self.y = x,", "x - y f = partial(subtraction, 4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数", "f: if key < 10: print(key) # 上下文管理器 \"\"\" from math import sqrt,", "1] 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst", "| | instance of | | # | instance +------------>+ class +------------>+ metaclass|", "item2),函数的返回值和 # sequence 的下一个 item 再传给 function, reduce(lambda x, y: x * y,", "# def point(x, y): # print 'before yield' # yield x * x", "shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0] = 100 # print(sha, \" \",", ">>> s = '你好' # str 类型, utf-8 编码 # >>> u =", "会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好' #", "# >>> u_str = u'你好' # >>> str(u_str) # Traceback (most recent call", "py3 # class Foo(metaclass=PrefixMetaclass): # name = 'foo' # def bar(self): # #", "返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系", "= u'你好' # >>> str(u_str) # Traceback (most recent call last): # 正确做法", "的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y = 0, 1 while", "\"\"\" # __slots__ class Slots(object): __slots__ = \"name\", \"age\" def __init__(self, name, age):", "b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 #", "multiprocessing.pool import ThreadPool # 相当于from multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda x:", "z=0, *args, **kwargs): *() **{}, 打包,使用时解包 # func(1, 2, 3, 4, 5, 6)", "in range(5)) 生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。", "dict((name, value) for name, value in _attrs) # 转化为字典 _attrs['echo'] = lambda self,", "= \"shadow\" # print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\"", "# super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ # / \\ # A", "shadow_copy) # 偏函数 \"\"\" from functools import partial def subtraction(x, y): return x", "ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好' # >>>", "# sha[0] = 100 # print(sha, \" \", shadow_copy) # sha[3][0] = \"shadow\"", "import partial def subtraction(x, y): return x - y f = partial(subtraction, 4)", "return mro[mro.index(cls) + 1] 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index +", "item,即 function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。", "+ u # Traceback (most recent call last): # 正确做法 s.decode('utf-8') + u", "的每个 item,即 function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器)", "yield x f = fib() for key in f: if key < 10:", "print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\"", "# __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。", "bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name, value) for name,", "然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。", "线程池 from multiprocessing.pool import ThreadPool # 相当于from multiprocessing.dummy import Process pool = ThreadPool(5)", "shadow_copy) # sha[3][0] = \"shadow\" # print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy)", "sequnce) even_num = list(filter(lambda x: x % 2 == 0, [1, 2, 3,", "\" \", shadow_copy) # 偏函数 \"\"\" from functools import partial def subtraction(x, y):", "y = y, x + y yield x f = fib() for key", "子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst): mro =", "print 'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass): # name = 'foo' #", "'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass): # name = 'foo' # def", "u'你好' # >>> str(u_str) # Traceback (most recent call last): # 正确做法 str(u_str.encode('utf-8'))", "将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好' # >>> str(u_str) #", "# print 'value is: %s' % value # # # output # before", "ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base", "# C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls,", "Traceback (most recent call last): # 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str", "\"shadow\" print(deep, \" \", shadow_copy) # 偏函数 \"\"\" from functools import partial def", "deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy) # 偏函数 \"\"\"", "字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为", "import copy shadow_copy = [1, 2, 3, [4, 5, 6]] sha = shadow_copy.copy()", "\"\"\" >>> import sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" # Python2", "pow class Point(object): def __init__(self, x, y): print 'initialize x and y' self.x,", "of | | instance of | | # | instance +------------>+ class +------------>+", "语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。", "before yield # value is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和", "type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 #", "# x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x)", "# | utf8 |<------------| | # | 字节码 | encode | | #", "# 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1,", "_attrs = (('my_' + name, value) for name, value in attrs.items()) _attrs =", "如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>>", "打包,使用时解包 # func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5,", "+----------+ +----------+ # | ascii| decode | | # | str gbk +------------>+", "方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常, # 返回", "for name, value in attrs.items()) _attrs = dict((name, value) for name, value in", "|<------------| | # | 字节码 | encode | | # +----------+ +----------+ #", "| instance of | | # | instance +------------>+ class +------------>+ metaclass| #", "2, 3, 4]) # 相当于 ((1 * 2) * 3) * 4 #", "print(deep, \" \", shadow_copy) # 偏函数 \"\"\" from functools import partial def subtraction(x,", "__enter__ 和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__", "= unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含", "even_num = list(filter(lambda x: x % 2 == 0, [1, 2, 3, 4,", "# 返回创建后的类 # py2 class Foo(object): __metaclass__ = PrefixMetaclass name = 'foo' def", "filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x % 2 == 0, [1,", "while True: x, y = y, x + y yield x f =", "而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls) +", "拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码", "一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好'", "依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x * x, [1, 2, 3, 4])", "语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as", "[1, 2, 3, 4, 5, 6])) # 将 function 依次作用于 sequnce 的每个 item,即", "\\ / # \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C,", "x, y def __enter__(self): print \"Entering context\" return self def __exit__(self, type, value,", "\", shadow_copy) # 偏函数 \"\"\" from functools import partial def subtraction(x, y): return", "24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+", "元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它", "_attrs) # 返回创建后的类 # py2 class Foo(object): __metaclass__ = PrefixMetaclass name = 'foo'", "s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str", "4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象", "\"\"\" from math import sqrt, pow class Point(object): def __init__(self, x, y): print", "contextmanager # # @contextmanager # def point(x, y): # print 'before yield' #", "3) * 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x %", "3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={} # 高阶函数", "u_str = u'你好' # >>> str(u_str) # Traceback (most recent call last): #", "* 2) * 3) * 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda", "迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 #", "x % 2 == 0, [1, 2, 3, 4, 5, 6])) # 将", "编码 # >>> u = u'世界' # unicode 类型 # >>> s +", "MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__ class Slots(object): __slots__", "value) for name, value in attrs.items()) _attrs = dict((name, value) for name, value", "sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x * x, [1,", "% value # # # output # before yield # value is: 25", "中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x * x, [1, 2,", "https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ # / \\ # A B #", "y' self.x, self.y = x, y def __enter__(self): print \"Entering context\" return self", "'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ #", "# >>> def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包 # func(1,", "str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0, *args, **kwargs):", "= sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance \"\"\" # 通过yield实现 # from", "# yield x * x + y * y # print 'after yield'", "y * y # print 'after yield' # # with point(3, 4) as", ">>> u_str = u'你好' # >>> str(u_str) # Traceback (most recent call last):", "type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) # 返回创建后的类 # py2 class Foo(object):", "import sqrt, pow class Point(object): def __init__(self, x, y): print 'initialize x and", "x = u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式,", "= '你好' # str 类型, utf-8 编码 # >>> u = u'世界' #", "# 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2, 3, [4, 5, 6]] sha", "# A B # \\ / # \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C,", "+----------+ +----------+ # | | instance of | | instance of | |", "# filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x % 2 == 0,", "'encoding': 'ascii' # x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x =", "yield # value is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__", "sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance \"\"\" # 通过yield实现 # from contextlib", "类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>>", "5, 6])) # 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的 #", "return x - y f = partial(subtraction, 4) # 4 赋给了 x partial", "mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls 在当前 MRO 列表中的", "y yield x f = fib() for key in f: if key <", "as value: # print 'value is: %s' % value # # # output", "并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__ class Slots(object): __slots__ = \"name\", \"age\"", "call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x,", "print \"Entering context\" return self def __exit__(self, type, value, traceback): print \"Exiting context\"", "它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包 #", "output # before yield # value is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了", "context\" def get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance \"\"\"", "python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding() py2 'ascii'", "func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={}", "迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 #", "# 线程池 from multiprocessing.pool import ThreadPool # 相当于from multiprocessing.dummy import Process pool =", "_attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象)", "+----------+ class PrefixMetaclass(type): def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs =", "sha = shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0] = 100 # print(sha,", "kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) # 对 sequence", "print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \",", "__enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 #", "5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) #", "# >>> u = u'世界' # unicode 类型 # >>> s + u", "赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3", "实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers =", "如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由", "# class Foo(metaclass=PrefixMetaclass): # name = 'foo' # def bar(self): # # print", "\\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base]", "print \"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y, 2)) return", "traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下", "\"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类", "__slots__ class Slots(object): __slots__ = \"name\", \"age\" def __init__(self, name, age): self.name =", "# Base # / \\ # / \\ # A B # \\", "my_ _attrs = (('my_' + name, value) for name, value in attrs.items()) _attrs", "= \"shadow\" print(deep, \" \", shadow_copy) # 偏函数 \"\"\" from functools import partial", "要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y", "# 会进行隐式转换,即 s.decode('ascii') + u # Traceback (most recent call last): # 正确做法", ">>> u = u'世界' # unicode 类型 # >>> s + u #", "2)) return distance \"\"\" # 通过yield实现 # from contextlib import contextmanager # #", "类型, utf-8 编码 # >>> u = u'世界' # unicode 类型 # >>>", "# 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0,", "class Foo(metaclass=PrefixMetaclass): # name = 'foo' # def bar(self): # # print 'bar'", "\\ # A B # \\ / # \\ / # C \"\"\"", "来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用", "def __enter__(self): print \"Entering context\" return self def __exit__(self, type, value, traceback): print", "统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 #", "next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x", "类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str =", "/ \\ # / \\ # A B # \\ / # \\", "参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0, *args, **kwargs): *() **{},", "A, B, Base] def super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls) + 1]", "index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls 在当前", "functools import partial def subtraction(x, y): return x - y f = partial(subtraction,", "yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y = 0, 1 while True:", "func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包 # func(1, 2, 3, 4,", "方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则", "C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst):", "yield' # yield x * x + y * y # print 'after", "name, bases, _attrs) # 返回创建后的类 # py2 class Foo(object): __metaclass__ = PrefixMetaclass name", "2 == 0, [1, 2, 3, 4, 5, 6])) # 将 function 依次作用于", "将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple", "= dict((name, value) for name, value in _attrs) # 转化为字典 _attrs['echo'] = lambda", "来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots =", "import sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和", "sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给 function,", "__init__(self, x, y): print 'initialize x and y' self.x, self.y = x, y", "# 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0, *args, **kwargs): *()", "\"shadow\" # print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep,", "类型 # >>> s + u # 会进行隐式转换,即 s.decode('ascii') + u # Traceback", "= \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+ +----------+ # |", "# 相当于 ((1 * 2) * 3) * 4 # filter 函数用于过滤元素,filter(function, sequnce)", "sha[3][0] = \"shadow\" # print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] =", "# Traceback (most recent call last): # 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是", "# 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。", "+ y * y # print 'after yield' # # with point(3, 4)", "protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers", "= u'世界' # unicode 类型 # >>> s + u # 会进行隐式转换,即 s.decode('ascii')", "subtraction(x, y): return x - y f = partial(subtraction, 4) # 4 赋给了", "fib() for key in f: if key < 10: print(key) # 上下文管理器 \"\"\"", "__metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类 #", ">>> def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包 # func(1, 2,", "# 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x),", "instance of | | # | instance +------------>+ class +------------>+ metaclass| # |", "attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name, value) for name, value", "# sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str", "# print 'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass): # name = 'foo'", "上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 #", "= name self.age = age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了", "cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__ class", "for x in range(5)) 生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地", "+----------+ +----------+ +----------+ # | | instance of | | instance of |", "\" \", shadow_copy) # sha[0] = 100 # print(sha, \" \", shadow_copy) #", "都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref", "# pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事:", "# 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) #", "unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好' # str 类型, utf-8 编码", "3, 4]) # 使用 lambda lamda args: # reduce(function, sequence[, initial]) # 先将", "返回创建后的类 # py2 class Foo(object): __metaclass__ = PrefixMetaclass name = 'foo' def bar(self):", "u # Traceback (most recent call last): # 正确做法 s.decode('utf-8') + u #", "的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给 function, reduce(lambda", "name, value in _attrs) # 转化为字典 _attrs['echo'] = lambda self, phrase: phrase #", "'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding()", "列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__ class Slots(object): __slots__ =", "self.age = age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的", "metaclass| # | | | | | | # +----------+ +----------+ +----------+ class", "reduce(lambda x, y: x * y, [1, 2, 3, 4]) # 相当于 ((1", "方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with", "语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type,", "yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, #", "# 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y =", "= \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x) = unicode", "# >>> s = '你好' # str 类型, utf-8 编码 # >>> u", "这种函数称之为高阶函数 # map(function, sequence) # 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是", "直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) #", "yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def", "list(filter(lambda x: x % 2 == 0, [1, 2, 3, 4, 5, 6]))", "| | | # +----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name, bases,", "ThreadPool # 相当于from multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda x: x *", "if key < 10: print(key) # 上下文管理器 \"\"\" from math import sqrt, pow", "的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用", "def bar(self): # # print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到", "# >>> str(u_str) # Traceback (most recent call last): # 正确做法 str(u_str.encode('utf-8')) #", "# __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常,", "__weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收. # 在 Python 中,当一个对象的引用数目为 0", "偏函数 \"\"\" from functools import partial def subtraction(x, y): return x - y", "def subtraction(x, y): return x - y f = partial(subtraction, 4) # 4", "在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__ class Slots(object):", "def get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance \"\"\" #", "age): self.name = name self.age = age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\"", "value: # print 'value is: %s' % value # # # output #", "= (('my_' + name, value) for name, value in attrs.items()) _attrs = dict((name,", "function, reduce(lambda x, y: x * y, [1, 2, 3, 4]) # 相当于", "+----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x =", "yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield", "reduce(function, sequence[, initial]) # 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 #", "u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。", "y: x * y, [1, 2, 3, 4]) # 相当于 ((1 * 2)", "[1, 2, 3, 4]) # 相当于 ((1 * 2) * 3) * 4", "next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契 def fib(): x, y = 0, 1", "chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x", "object 的普通类意味着调用 type 来创建它 # 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>>", "# coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和", "正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成", "只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\", 24)", "py2 class Foo(object): __metaclass__ = PrefixMetaclass name = 'foo' def bar(self): # print", "# 迭代器生成器实现斐波那契 def fib(): x, y = 0, 1 while True: x, y", "# # print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用", "中文则采用utf-8编码 # x = u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii #", "next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 #", "列表 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\" #", "4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={} # 高阶函数 #", "return type.__new__(cls, name, bases, _attrs) # 返回创建后的类 # py2 class Foo(object): __metaclass__ =", "name = 'foo' def bar(self): # print 'bar' pass # py3 # class", "2, 3, 4]) # 使用 lambda lamda args: # reduce(function, sequence[, initial]) #", "with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收. # 在", "pass # py3 # class Foo(metaclass=PrefixMetaclass): # name = 'foo' # def bar(self):", "x * x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\", "args: # reduce(function, sequence[, initial]) # 先将 sequence 的前两个 item 传给 function,即 function(item1,", "查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__", "(x for x in range(5)) 生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 #", "# 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好' # str 类型,", "from multiprocessing.pool import ThreadPool # 相当于from multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda", "# 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding()", "MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找", "4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 # 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在", "__next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x in", "= partial(subtraction, 4) # 4 赋给了 x partial 的功能:固定函数参数,返回一个新的函数 \"\"\" # 迭代器 #", "s + u # 会进行隐式转换,即 s.decode('ascii') + u # Traceback (most recent call", "# 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2, 3, [4, 5,", "6])) # 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的 # item", "一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到", "f = fib() for key in f: if key < 10: print(key) #", "# with point(3, 4) as value: # print 'value is: %s' % value", "字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和", "# 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import", "sequnce 的每个 item,即 function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3", "[4, 5, 6]] sha = shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0] =", "\"age\" def __init__(self, name, age): self.name = name self.age = age \"使用 __slots__", "则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode", "deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy) # 偏函数 \"\"\" from functools import", "2, 3, [4, 5, 6]] sha = shadow_copy.copy() print(sha, \" \", shadow_copy) #", "设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job = \"computer\"", "# 拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 #", "= age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots", "和 unicode # +----------+ +----------+ # | ascii| decode | | # |", "类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好' # >>> str(u_str) # Traceback (most", "unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好'", "| # | str gbk +------------>+ unicode + # | utf8 |<------------| |", "PrefixMetaclass(type): def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' +", "x f = fib() for key in f: if key < 10: print(key)", "= b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码", "type 来创建它 # 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys", "返回,也就是 map(lambda x: x * x, [1, 2, 3, 4]) # 使用 lambda", "__slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots", "function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x * x, [1, 2, 3, 4]) #", "yield x * x + y * y # print 'after yield' #", "关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next()", "item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 #", "# \\ / # \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个", "# __weakref__弱引用 # 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。 # 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收. # 在 Python 中,当一个对象的引用数目为", ">>> import sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str", "* x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ #", "含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; #", "x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x = \"你好\", chardet.detect(x) 'encoding': 'utf-8'", "# item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。", "\"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' #", "phrase: phrase # 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases,", "x + y yield x f = fib() for key in f: if", "x and y' self.x, self.y = x, y def __enter__(self): print \"Entering context\"", "# before yield # value is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__", "# value is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。", "中有两种和字符串相关的类型:str 和 unicode # +----------+ +----------+ # | ascii| decode | | #", "# 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy", "+----------+ # | | instance of | | instance of | | #", "math import sqrt, pow class Point(object): def __init__(self, x, y): print 'initialize x", "浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2,", "= lambda self, phrase: phrase # 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return", "# +----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x", "instance of | | instance of | | # | instance +------------>+ class", "type, value 和 traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 #", "z=3, args=(4, 5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function,", "当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\"", "# | | instance of | | instance of | | # |", "name, value) for name, value in attrs.items()) _attrs = dict((name, value) for name,", "# 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x *", "传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给 function, reduce(lambda x, y:", "25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 # 通常,我们使用 with", "6]] sha = shadow_copy.copy() print(sha, \" \", shadow_copy) # sha[0] = 100 #", "yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用", "# print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type", "x, y: x * y, [1, 2, 3, 4]) # 相当于 ((1 *", "sys.getdefaultencoding() py2 'ascii' py3 'utf-8' \"\"\" # Python2 中有两种和字符串相关的类型:str 和 unicode # +----------+", "的下一个 item 再传给 function, reduce(lambda x, y: x * y, [1, 2, 3,", "(('my_' + name, value) for name, value in attrs.items()) _attrs = dict((name, value)", "| # +----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name, bases, attrs): #", "的 MRO 列表 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1]", "+------------>+ unicode + # | utf8 |<------------| | # | 字节码 | encode", "x: x * x, [1, 2, 3, 4]) # 使用 lambda lamda args:", "recent call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def", "with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__", "# 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器", "# 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name, value) for name, value in", "迭代器生成器实现斐波那契 def fib(): x, y = 0, 1 while True: x, y =", "# print 'before yield' # yield x * x + y * y", "\"Entering context\" return self def __exit__(self, type, value, traceback): print \"Exiting context\" def", "class Point(object): def __init__(self, x, y): print 'initialize x and y' self.x, self.y", "5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数,", "point(3, 4) as value: # print 'value is: %s' % value # #", "函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x % 2 == 0, [1, 2,", "traceback): print \"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y, 2))", "__metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 #", "= 'foo' def bar(self): # print 'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass):", "# name = 'foo' # def bar(self): # # print 'bar' # pass", "# type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) # 返回创建后的类 # py2 class", "call last): # 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用", "Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\",", "+------------>+ metaclass| # | | | | | | # +----------+ +----------+ +----------+", "'initialize x and y' self.x, self.y = x, y def __enter__(self): print \"Entering", "迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有", "encode | | # +----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii'", "class Slots(object): __slots__ = \"name\", \"age\" def __init__(self, name, age): self.name = name", "# str 类型, utf-8 编码 # >>> u = u'世界' # unicode 类型", "__exit__ 方法。 # 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景, # 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with", "x, y = 0, 1 while True: x, y = y, x +", "# +----------+ +----------+ +----------+ # | | instance of | | instance of", "utf-8 编码 # >>> u = u'世界' # unicode 类型 # >>> s", "_attrs = dict((name, value) for name, value in _attrs) # 转化为字典 _attrs['echo'] =", "name self.age = age \"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\"", "< 10: print(key) # 上下文管理器 \"\"\" from math import sqrt, pow class Point(object):", "并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls 在当前 MRO", "+------------>+ class +------------>+ metaclass| # | | | | | | # +----------+", "'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # / \\ # / \\ #", "inst 的 MRO 列表 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index +", "= list(filter(lambda x: x % 2 == 0, [1, 2, 3, 4, 5,", "__slots__ = \"name\", \"age\" def __init__(self, name, age): self.name = name self.age =", "字符编码 python2 和 python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding() py2", "sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成", "for key in f: if key < 10: print(key) # 上下文管理器 \"\"\" from", "x: x * x, (\"args1\", 'args2',)) # super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html # Base # /", "# x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x = \"你好\", chardet.detect(x) 'encoding':", "'foo' # def bar(self): # # print 'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找", "+----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码 # x = b\"hello\"", "# 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii #", "x in range(5)) 生成器函数 含有 yield 关键字的函数 # yield 把函数变成了一个生成器。 # 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断", "# +----------+ +----------+ # | ascii| decode | | # | str gbk", "last): # 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii", "and y' self.x, self.y = x, y def __enter__(self): print \"Entering context\" return", "from contextlib import contextmanager # # @contextmanager # def point(x, y): # print", "| instance +------------>+ class +------------>+ metaclass| # | | | | | |", "相当于 ((1 * 2) * 3) * 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num", "str 类型再运算,这时就很容易出现 UnicodeEncodeError。 # >>> u_str = u'你好' # >>> str(u_str) # Traceback", "distance = sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance \"\"\" # 通过yield实现 #", "| | | | | | # +----------+ +----------+ +----------+ class PrefixMetaclass(type): def", "x * x + y * y # print 'after yield' # #", "'before yield' # yield x * x + y * y # print", "# print(sha, \" \", shadow_copy) # sha[3][0] = \"shadow\" # print(sha, \" \",", "B, Base] def super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls) + 1] 查找", "MRO 列表 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] \"\"\"", "def __init__(self, name, age): self.name = name self.age = age \"使用 __slots__ 来告诉", "会进行隐式转换,即 s.decode('ascii') + u # Traceback (most recent call last): # 正确做法 s.decode('utf-8')", "# 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 #", "# map(function, sequence) # 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda", "shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy) # 偏函数", "lambda self, phrase: phrase # 增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls,", "__metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义", "# 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。 # 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象; # 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, #", ">>> s + u # 会进行隐式转换,即 s.decode('ascii') + u # Traceback (most recent", "# +----------+ +----------+ +----------+ class PrefixMetaclass(type): def __new__(cls, name, bases, attrs): # 给所有属性和方法前面加上前缀", "作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。 # 迭代器生成器实现斐波那契", "(most recent call last): # 正确做法 s.decode('utf-8') + u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是", "\"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x, 2) + pow(self.y, 2)) return distance", "# / \\ # A B # \\ / # \\ / #", "'foo' def bar(self): # print 'bar' pass # py3 # class Foo(metaclass=PrefixMetaclass): #", "unicode + # | utf8 |<------------| | # | 字节码 | encode |", "index, 并返回它的下一个类,即 mro[index + 1] \"\"\" # __slots__ class Slots(object): __slots__ = \"name\",", "True: x, y = y, x + y yield x f = fib()", "再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好' # str 类型, utf-8 编码 #", "python3 # https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html \"\"\" >>> import sys >>> sys.getdefaultencoding() py2 'ascii' py3 'utf-8'", "查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1] 事实上super里面实现的是:获取 inst 的", "/ \\ # A B # \\ / # \\ / # C", "* x, [1, 2, 3, 4]) # 使用 lambda lamda args: # reduce(function,", "= 100 # print(sha, \" \", shadow_copy) # sha[3][0] = \"shadow\" # print(sha,", "'ascii' 默认使用ascii编码 # x = b\"hello\" chardet.detect(x), 'encoding': 'ascii' # x = \"你好\",", "gbk +------------>+ unicode + # | utf8 |<------------| | # | 字节码 |", "深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用 # 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响 # 改变456的值,两个列表都将改变 # 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy", "* x + y * y # print 'after yield' # # with", "# unicode 类型 # >>> s + u # 会进行隐式转换,即 s.decode('ascii') + u", "name, bases, attrs): # 给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name, value) for", "of | | # | instance +------------>+ class +------------>+ metaclass| # | |", "sequence) # 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x", "% 2 == 0, [1, 2, 3, 4, 5, 6])) # 将 function", "+ pow(self.y, 2)) return distance \"\"\" # 通过yield实现 # from contextlib import contextmanager", "print(sha, \" \", shadow_copy) # sha[3][0] = \"shadow\" # print(sha, \" \", shadow_copy)", "\"子类允许定义的属性就是自身的 slots 加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job = \"computer\" # 元类", "Foo(metaclass=PrefixMetaclass): # name = 'foo' # def bar(self): # # print 'bar' #", "# Traceback (most recent call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。", "* 3) * 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x: x", "def point(x, y): # print 'before yield' # yield x * x +", "mro[index + 1] \"\"\" # __slots__ class Slots(object): __slots__ = \"name\", \"age\" def", "先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给", "# print 'after yield' # # with point(3, 4) as value: # print", "List 返回,也就是 map(lambda x: x * x, [1, 2, 3, 4]) # 使用", "| # | instance +------------>+ class +------------>+ metaclass| # | | | |", "s.decode('ascii') + u # Traceback (most recent call last): # 正确做法 s.decode('utf-8') +", "通过yield实现 # from contextlib import contextmanager # # @contextmanager # def point(x, y):", "语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。 # 如果退出时没有发生异常,则 __exit__ 的三个参数,即", "contextlib import contextmanager # # @contextmanager # def point(x, y): # print 'before", "解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好' # str", "Slots(\"keke\", 24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+ +----------+", "function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给 function, reduce(lambda x, y: x", "_attrs) # 转化为字典 _attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo 方法", "# # output # before yield # value is: 25 # after yield", "type 来创建这个类。\" # 元类主要做了三件事: # 拦截类的创建 # 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object", "1] 事实上super里面实现的是:获取 inst 的 MRO 列表 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即", "self def __exit__(self, type, value, traceback): print \"Exiting context\" def get_distance(self): distance =", "sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8 # 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode)", "((1 * 2) * 3) * 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num =", "def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包 # func(1, 2, 3,", "pow(self.y, 2)) return distance \"\"\" # 通过yield实现 # from contextlib import contextmanager #", "in _attrs) # 转化为字典 _attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo", "Point(object): def __init__(self, x, y): print 'initialize x and y' self.x, self.y =", "u # 会进行隐式转换,即 s.decode('ascii') + u # Traceback (most recent call last): #", "对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是 map(lambda x: x * x,", "# 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值, # 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来; # 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有", "def super(cls, inst): mro = inst.__class__.mro() return mro[mro.index(cls) + 1] 查找 cls 在当前", "x=1, y=2, z=3, args=(4, 5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数", "# 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数", "lambda lamda args: # reduce(function, sequence[, initial]) # 先将 sequence 的前两个 item 传给", "class +------------>+ metaclass| # | | | | | | # +----------+ +----------+", "# reduce(function, sequence[, initial]) # 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和", "+ y yield x f = fib() for key in f: if key", "[1, 2, 3, 4]) # 使用 lambda lamda args: # reduce(function, sequence[, initial])", "/ # \\ / # C \"\"\" 子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A,", "item 传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给 function, reduce(lambda x,", "给所有属性和方法前面加上前缀 my_ _attrs = (('my_' + name, value) for name, value in attrs.items())", "= fib() for key in f: if key < 10: print(key) # 上下文管理器", "确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。 # __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。 # __exit__", "= Slots(\"keke\", 24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html # 类是实例对象的模板,元类是类的模板 # +----------+", "self.x, self.y = x, y def __enter__(self): print \"Entering context\" return self def", "y, x + y yield x f = fib() for key in f:", "chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8", "print 'after yield' # # with point(3, 4) as value: # print 'value", "distance \"\"\" # 通过yield实现 # from contextlib import contextmanager # # @contextmanager #", "*() **{}, 打包,使用时解包 # func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3,", "shadow_copy) # sha[0] = 100 # print(sha, \" \", shadow_copy) # sha[3][0] =", "= 'foo' # def bar(self): # # print 'bar' # pass \"Python 会首先在当前类中寻找", "使用 lambda lamda args: # reduce(function, sequence[, initial]) # 先将 sequence 的前两个 item", "'bar' # pass \"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__\" \"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,\" \"如果还是找不到,就会用 type 来创建这个类。\" #", "str 类型, utf-8 编码 # >>> u = u'世界' # unicode 类型 #", "def fib(): x, y = 0, 1 while True: x, y = y,", "增加了一个 echo 方法 # type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象) return type.__new__(cls, name, bases, _attrs) # 返回创建后的类", "# 偏函数 \"\"\" from functools import partial def subtraction(x, y): return x -", "str(u_str) # Traceback (most recent call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 #", "'utf-8' 中文则采用utf-8编码 # x = u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii", "# x = u\"你好\" type(x) = unicode # coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii # sys.setdefaultencoding(utf-8)", "function(item1, item2),函数的返回值和 # sequence 的下一个 item 再传给 function, reduce(lambda x, y: x *", "| | # +----------+ +----------+ # 在python2中,x = \"hello\", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码", "__exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with", "# 修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码 python2", "依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的 # item 组成一个 List/String/Tuple (取决于 sequnce", "is: %s' % value # # # output # before yield # value", "\" \", shadow_copy) # sha[3][0] = \"shadow\" # print(sha, \" \", shadow_copy) deep", "None。如果发生异常, # 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。 # __weakref__弱引用 # 首先先说下 weakref :", "%s' % value # # # output # before yield # value is:", "| ascii| decode | | # | str gbk +------------>+ unicode + #", "# 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>> def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包", "高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 # map(function, sequence) # 对 sequence 中的 item", "multiprocessing.dummy import Process pool = ThreadPool(5) pool.apply_async(lambda x: x * x, (\"args1\", 'args2',))", "B, Base],依次调用列表中的每一个 而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base] def super(cls, inst): mro = inst.__class__.mro() return", "# 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和 # sequence 的下一个 item", "\"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性\" \"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,\" \"子类允许定义的属性就是自身的 slots 加上父类的 slots。\"", "(most recent call last): # 正确做法 str(u_str.encode('utf-8')) # 参数魔法 # 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。 # >>>", "2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={} #", "slots 加上父类的 slots。\" slots = Slots(\"keke\", 24) slots.job = \"computer\" # 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html", "str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 #", "4]) # 使用 lambda lamda args: # reduce(function, sequence[, initial]) # 先将 sequence", "str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。 # >>> s = '你好' #", "1 while True: x, y = y, x + y yield x f", "'你好' # str 类型, utf-8 编码 # >>> u = u'世界' # unicode", "u = u'世界' # unicode 类型 # >>> s + u # 会进行隐式转换,即", "2) + pow(self.y, 2)) return distance \"\"\" # 通过yield实现 # from contextlib import", "# | 字节码 | encode | | # +----------+ +----------+ # 在python2中,x =", "\" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \" \", shadow_copy)", "Python3 中是 __next__() 方法) # 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值 # 生成器 # 它有两种构造方式:生成器表达式,numbers = (x for", "组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。 # 深浅拷贝 # 赋值是引用,一个更改另一个也更改。 # 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用", "return self def __exit__(self, type, value, traceback): print \"Exiting context\" def get_distance(self): distance", "mro[mro.index(cls) + 1] 查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1]", "| utf8 |<------------| | # | 字节码 | encode | | # +----------+", "__exit__(self, type, value, traceback): print \"Exiting context\" def get_distance(self): distance = sqrt(pow(self.x, 2)", "ascii| decode | | # | str gbk +------------>+ unicode + # |", "for name, value in _attrs) # 转化为字典 _attrs['echo'] = lambda self, phrase: phrase", "value) for name, value in _attrs) # 转化为字典 _attrs['echo'] = lambda self, phrase:", "= PrefixMetaclass name = 'foo' def bar(self): # print 'bar' pass # py3", "in attrs.items()) _attrs = dict((name, value) for name, value in _attrs) # 转化为字典", "PrefixMetaclass name = 'foo' def bar(self): # print 'bar' pass # py3 #", "修改类的定义 # 返回修改后的类 # 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它 # 字符编码 python2 和", "value is: 25 # after yield # 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。 #", "# print(sha, \" \", shadow_copy) deep = copy.deepcopy(shadow_copy) deep[3][0] = \"shadow\" print(deep, \"", "2) * 3) * 4 # filter 函数用于过滤元素,filter(function, sequnce) even_num = list(filter(lambda x:", "print 'initialize x and y' self.x, self.y = x, y def __enter__(self): print", "y=2, z=3, args=(4, 5, 6), kwargs={} # 高阶函数 # 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数, # 这种函数称之为高阶函数 #", "'ascii' # x = \"你好\", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码 # x = u\"你好\"", "x + y * y # print 'after yield' # # with point(3,", "在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode) # 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。", "+ u # 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii # 将其编码成 str 类型再运算,这时就很容易出现", "深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系 import copy shadow_copy = [1, 2, 3, [4, 5, 6]] sha =" ]
[ "b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self):", "b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self,", "'<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary", ") class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303 With :param:`cookie` set it", "which sends code 303 With :param:`cookie` set it is often used for login", "is used to provide \"success\" page for various web forms and other non-idempotent", "headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>'", "headers(self): sup = super().headers().copy() if self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='') return", "'<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') )", "to notify that there is not such child\"\"\" class NiceError(Exception): \"\"\"Error that is", "'{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')] + self.location_header())", "class NiceError(Exception): \"\"\"Error that is safe to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta):", "Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert status_text", "often used for login forms. Without parameter set it is used to provide", "default_response(self): pass class Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>'", "Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')],", "NiceError(Exception): \"\"\"Error that is safe to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod", "forms and other non-idempotent actions \"\"\" def __init__(self, location, cookie=None, *, status_code=303, status_text=None):", "and other non-idempotent actions \"\"\" def __init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location,", "Page Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type',", "metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path =", "\"success\" page for various web forms and other non-idempotent actions \"\"\" def __init__(self,", "login forms. Without parameter set it is used to provide \"success\" page for", "for various web forms and other non-idempotent actions \"\"\" def __init__(self, location, cookie=None,", "b'</html>' ) class InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>'", "Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' ) class", "Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException):", "b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self):", "that is safe to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self,", "b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server", "= self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that there", "code 303 With :param:`cookie` set it is often used for login forms. Without", "assert status_text is None, \"Not Implemented\" self.status_code = status_code self.location = location self.statusline", "b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' )", "Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def", "\"\"\"Temporary redirect which sends code 303 With :param:`cookie` set it is often used", "not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to", "(403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>'", "def update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path def", "\"\"\"Error that is safe to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def", "b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>'", "'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method", "html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>'", "def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>'", "forms. Without parameter set it is used to provide \"success\" page for various", "b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return", "'<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect):", "is not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local", "which render error code (and page) to client\"\"\" @abc.abstractmethod def default_response(self): pass class", "(404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>'", "class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405", "def location_header(self): return [('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def", "None, \"Not Implemented\" self.status_code = status_code self.location = location self.statusline = '{:d}'.format(status_code) def", "html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>'", "default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a", "sends code 303 With :param:`cookie` set it is often used for login forms.", "def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>'", "is None, \"Not Implemented\" self.status_code = status_code self.location = location self.statusline = '{:d}'.format(status_code)", "b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>'", "Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' ) class", "None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify", "to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class", "*, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self): sup =", "'</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303 With :param:`cookie`", "(self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>'", "class WebException(Exception): \"\"\"Base for all exceptions which render error code (and page) to", "(500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>'", "safe to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass", "update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path def update_request(self,", "it is often used for login forms. Without parameter set it is used", "pass class Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>'", "class NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404", "InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal", "notify that there is not such child\"\"\" class NiceError(Exception): \"\"\"Error that is safe", "b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self, location, status_code,", "class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path):", "+ self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>'", "there is not such child\"\"\" class NiceError(Exception): \"\"\"Error that is safe to present", "default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>'", "b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self): return (500,", "'text/html')] + self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>'", "Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self): return", "b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert status_text is", "web forms and other non-idempotent actions \"\"\" def __init__(self, location, cookie=None, *, status_code=303,", "return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>'", "b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not", "not such child\"\"\" class NiceError(Exception): \"\"\"Error that is safe to present to user\"\"\"", "(405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>'", "is safe to present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request):", "def headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE", "PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path def update_request(self, request): request.uri = self.new_path", "Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE", "b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>'", "Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException):", "return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403", "(and page) to client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def default_response(self): return", ") class NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>'", "status_text is None, \"Not Implemented\" self.status_code = status_code self.location = location self.statusline =", "self.location)] def headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return (self.statusline, self.headers(),", "'</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303 With :param:`cookie` set", "cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self): sup", "'<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends", "def headers(self): sup = super().headers().copy() if self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='')", "by resolve_local to notify that there is not such child\"\"\" class NiceError(Exception): \"\"\"Error", "resolve_local to notify that there is not such child\"\"\" class NiceError(Exception): \"\"\"Error that", "self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8')", "Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>'", "default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>'", "b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self): return", "b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self): return (404,", "location, status_code, status_text=None): assert status_text is None, \"Not Implemented\" self.status_code = status_code self.location", "self.cookie = cookie def headers(self): sup = super().headers().copy() if self.cookie is not None:", "return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>'", "Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert status_text is None, \"Not Implemented\" self.status_code", "provide \"success\" page for various web forms and other non-idempotent actions \"\"\" def", "exceptions which render error code (and page) to client\"\"\" @abc.abstractmethod def default_response(self): pass", "Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def", "to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect): def", "user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self,", "return [('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return", "such child\"\"\" class NiceError(Exception): \"\"\"Error that is safe to present to user\"\"\" class", "b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not", "redirect which sends code 303 With :param:`cookie` set it is often used for", "Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' ) class", "Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException):", "'<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303", "for login forms. Without parameter set it is used to provide \"success\" page", "Method Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self, location, status_code, status_text=None):", "status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self): sup = super().headers().copy() if self.cookie is", "b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' )", "\"\"\"Raised by resolve_local to notify that there is not such child\"\"\" class NiceError(Exception):", ") class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>'", "Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE", "return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>'", "abc class WebException(Exception): \"\"\"Base for all exceptions which render error code (and page)", "location_header(self): return [('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self):", "\"\"\"Base for all exceptions which render error code (and page) to client\"\"\" @abc.abstractmethod", "= location self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def headers(self): return", "b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' )", "set it is often used for login forms. Without parameter set it is", "def __init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie", "OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that there is not such child\"\"\" class", "Without parameter set it is used to provide \"success\" page for various web", "present to user\"\"\" class InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect):", "import abc class WebException(Exception): \"\"\"Base for all exceptions which render error code (and", "to client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def default_response(self): return (403, [('Content-Type',", "Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert", "= status_code self.location = location self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)]", "child\"\"\" class NiceError(Exception): \"\"\"Error that is safe to present to user\"\"\" class InternalRedirect(Exception,", "\"\"\" def __init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie =", "page for various web forms and other non-idempotent actions \"\"\" def __init__(self, location,", "html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class", "= super().headers().copy() if self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class", "b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>'", "\"Not Implemented\" self.status_code = status_code self.location = location self.statusline = '{:d}'.format(status_code) def location_header(self):", "b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>'", "self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that there is", "class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path def update_request(self, request): request.uri =", "actions \"\"\" def __init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie", "sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that", "def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not", "request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path def update_request(self, request):", "for all exceptions which render error code (and page) to client\"\"\" @abc.abstractmethod def", "default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>'", "status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self): sup = super().headers().copy() if", "sup = super().headers().copy() if self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup", "self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>' '<title>{0.statusline}</title>' '</head>' '<body>'", "def __init__(self, location, status_code, status_text=None): assert status_text is None, \"Not Implemented\" self.status_code =", ":param:`cookie` set it is often used for login forms. Without parameter set it", "client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')],", "WebException(Exception): \"\"\"Base for all exceptions which render error code (and page) to client\"\"\"", "it is used to provide \"success\" page for various web forms and other", "self.status_code = status_code self.location = location self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location',", "b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')],", "Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')],", "def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server", "b'</html>' ) class NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>'", "NotFound(WebException): def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page", "'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page", "b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>'", "self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')]", "used to provide \"success\" page for various web forms and other non-idempotent actions", "various web forms and other non-idempotent actions \"\"\" def __init__(self, location, cookie=None, *,", "other non-idempotent actions \"\"\" def __init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code,", "super().headers().copy() if self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception):", "self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised by", "sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that there is not such", "b'<body>' b'<h1>405 Method Not Allowed</h1>' b'</body>' b'</html>' ) class Redirect(WebException): def __init__(self, location,", "b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>'", "@abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE", "all exceptions which render error code (and page) to client\"\"\" @abc.abstractmethod def default_response(self):", "def default_response(self): return (404, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not", "parameter set it is used to provide \"success\" page for various web forms", "location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self):", "return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>'", "b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>'", "'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal", "b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self):", "def __init__(self, new_path): self.new_path = new_path def update_request(self, request): request.uri = self.new_path del", "b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def default_response(self): return (500, [('Content-Type',", "[('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500", "b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return (405,", "'<title>{0.statusline}</title>' '</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect", "b'</html>' ) class Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert status_text is None,", "Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def", "self.location = location self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def headers(self):", "'</head>' '<body>' '<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which", "'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>'", "303 With :param:`cookie` set it is often used for login forms. Without parameter", "MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method", "is often used for login forms. Without parameter set it is used to", "b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException): def", "status_code self.location = location self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def", "class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303 With :param:`cookie` set it is", ") class Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert status_text is None, \"Not", "def default_response(self): pass class Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>'", "CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303 With :param:`cookie` set it is often", "default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>'", "return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>'", "@abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path", "[('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>405 Method Not Allowed</title>' b'</head>' b'<body>' b'<h1>405", "set it is used to provide \"success\" page for various web forms and", "pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path = new_path def update_request(self, request): request.uri", "error code (and page) to client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def", "non-idempotent actions \"\"\" def __init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text)", "b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>' b'</body>' b'</html>' )", "class Redirect(WebException): def __init__(self, location, status_code, status_text=None): assert status_text is None, \"Not Implemented\"", "With :param:`cookie` set it is often used for login forms. Without parameter set", "Implemented\" self.status_code = status_code self.location = location self.statusline = '{:d}'.format(status_code) def location_header(self): return", "code (and page) to client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def default_response(self):", "__init__(self, location, status_code, status_text=None): assert status_text is None, \"Not Implemented\" self.status_code = status_code", "'<h1>{0.statusline}</h1>' '<a href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code", "that there is not such child\"\"\" class NiceError(Exception): \"\"\"Error that is safe to", "to provide \"success\" page for various web forms and other non-idempotent actions \"\"\"", "status_code, status_text=None): assert status_text is None, \"Not Implemented\" self.status_code = status_code self.location =", "[('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404", "location self.statusline = '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def headers(self): return ([('Content-Type',", "status_text=status_text) self.cookie = cookie def headers(self): sup = super().headers().copy() if self.cookie is not", "is not such child\"\"\" class NiceError(Exception): \"\"\"Error that is safe to present to", "b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class InternalError(WebException):", "page) to client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException): def default_response(self): return (403,", "InternalRedirect(Exception, metaclass=abc.ABCMeta): @abc.abstractmethod def update_request(self, request): pass class PathRewrite(InternalRedirect): def __init__(self, new_path): self.new_path", ") class InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>'", "__init__(self, new_path): self.new_path = new_path def update_request(self, request): request.uri = self.new_path del request.parsed_uri", "used for login forms. Without parameter set it is used to provide \"success\"", "href=\"{0.location}\">Follow</a>' '</body>' '</html>'.format(self).encode('utf-8') ) class CompletionRedirect(Redirect): \"\"\"Temporary redirect which sends code 303 With", "super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self): sup = super().headers().copy() if self.cookie", "[('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return (self.statusline,", "= cookie def headers(self): sup = super().headers().copy() if self.cookie is not None: sup['Set-Cookie']", "cookie def headers(self): sup = super().headers().copy() if self.cookie is not None: sup['Set-Cookie'] =", "Found</h1>' b'</body>' b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE", "[('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>'", "html>' b'<html>' b'<head>' b'<title>404 Page Not Found</title>' b'</head>' b'<body>' b'<h1>404 Page Not Found</h1>'", "b'<head>' b'<title>500 Internal Server Error</title>' b'</head>' b'<body>' b'<h1>500 Internal Server Error</h1>' b'</body>' b'</html>'", "class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that there is not such child\"\"\"", "b'</html>' ) class MethodNotAllowed(WebException): def default_response(self): return (405, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>'", "Internal Server Error</h1>' b'</body>' b'</html>' ) class NotFound(WebException): def default_response(self): return (404, [('Content-Type',", "<filename>aioroutes/exceptions.py import abc class WebException(Exception): \"\"\"Base for all exceptions which render error code", "class InternalError(WebException): def default_response(self): return (500, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>500", "status_text=None): assert status_text is None, \"Not Implemented\" self.status_code = status_code self.location = location", "return sup class OutOfScopeError(Exception): \"\"\"Raised by resolve_local to notify that there is not", "= '{:d}'.format(status_code) def location_header(self): return [('Location', self.location)] def headers(self): return ([('Content-Type', 'text/html')] +", "class Forbidden(WebException): def default_response(self): return (403, [('Content-Type', 'text/html')], b'<!DOCTYPE html>' b'<html>' b'<head>' b'<title>403", "([('Content-Type', 'text/html')] + self.location_header()) def default_response(self): return (self.statusline, self.headers(), '<!DOCTYPE html>' '<html>' '<head>'", "if self.cookie is not None: sup['Set-Cookie'] = self.cookie.output(header='') return sup class OutOfScopeError(Exception): \"\"\"Raised", "__init__(self, location, cookie=None, *, status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def", "render error code (and page) to client\"\"\" @abc.abstractmethod def default_response(self): pass class Forbidden(WebException):", "html>' b'<html>' b'<head>' b'<title>403 Forbidden</title>' b'</head>' b'<body>' b'<h1>403 Forbidden</h1>' b'</body>' b'</html>' ) class", "status_code=303, status_text=None): super().__init__(location, status_code=status_code, status_text=status_text) self.cookie = cookie def headers(self): sup = super().headers().copy()" ]
[ "'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus DHT", "comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed", "'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE',", "pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\" # Import", "likely being frozen and __file__ triggered this NameError # Let's work around that", "REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info < (2, 7): #tuple comparison element by", "register sdist upload \"\"\" # Import python libs import os import sys from", "SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA =", "by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster',", "store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster", "7): #tuple comparison element by element # Under Python 2.6, also install REQUIREMENTS.extend([", "any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most likely being", "running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most likely", "= ['libnacl>=1.4.0' ] if sys.version_info < (2, 7): #tuple comparison element by element", "# Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info <", "# We're most likely being frozen and __file__ triggered this NameError # Let's", "= os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata using exec()", "REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3, 4): #tuple comparison element by", "Consensus DHT database. Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>',", "Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={", "['libnacl>=1.4.0' ] if sys.version_info < (2, 7): #tuple comparison element by element #", "ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata using exec() in order", "os import sys from setuptools import setup, find_packages # Change to Alders's source's", "os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata using exec() in order not to", "python setup.py register sdist upload \"\"\" # Import python libs import os import", "trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info <", "install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3, 4): #tuple comparison element", "os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata using exec() in", "'__metadata__.py') # Load the metadata using exec() in order not to trigger alder.__init__", "]) if sys.version_info < (3, 4): #tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4',", "SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most likely being frozen and __file__", "4): #tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous", "= os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME,", "using exec() in order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS", "'*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'], }, install_requires=REQUIREMENTS, extras_require={},", "not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if", "directory prior to running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: #", "\"\"\" setup.py Basic setup file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python", "find_packages # Change to Alders's source's directory prior to running any command try:", "'alder', '__metadata__.py') # Load the metadata using exec() in order not to trigger", "packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf',", "python libs import os import sys from setuptools import setup, find_packages # Change", "long_description=' Consensus DHT database. Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__,", "if sys.version_info < (2, 7): #tuple comparison element by element # Under Python", "sys.version_info < (2, 7): #tuple comparison element by element # Under Python 2.6,", "libs import os import sys from setuptools import setup, find_packages # Change to", "# Import python libs import os import sys from setuptools import setup, find_packages", "(2, 7): #tuple comparison element by element # Under Python 2.6, also install", "'*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'], }, install_requires=REQUIREMENTS, extras_require={}, #scripts=['scripts/alder'], )", "install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\" # Import python", "Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json',", "' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus", "< (3, 4): #tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder',", "', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT", "description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus DHT database. Nested key value store.'", "element by element # Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ])", "import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info < (2, 7):", "exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info < (2, 7): #tuple", "Distributed Event Roster', long_description=' Consensus DHT database. Nested key value store.' ' ',", "'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css',", "http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\" # Import python libs import", "that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA", "and __file__ triggered this NameError # Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0])", "upload \"\"\" # Import python libs import os import sys from setuptools import", "['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'], }, install_requires=REQUIREMENTS,", "also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3, 4): #tuple comparison", "if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py')", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" setup.py Basic setup file to", "'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html',", "Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst',", "-*- coding: utf-8 -*- \"\"\" setup.py Basic setup file to enable pip install", "'': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the", "#tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical", "'argparse>=1.2.1' ]) if sys.version_info < (3, 4): #tuple comparison element by element REQUIREMENTS.extend([", "keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs',", "We're most likely being frozen and __file__ triggered this NameError # Let's work", "'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3, 4): #tuple comparison element by element", "setuptools import setup, find_packages # Change to Alders's source's directory prior to running", "'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico',", "import setup, find_packages # Change to Alders's source's directory prior to running any", "Basic setup file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register", "frozen and __file__ triggered this NameError # Let's work around that SETUP_DIRNAME =", "See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\" # Import python libs", "the metadata using exec() in order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA,", "setup.py register sdist upload \"\"\" # Import python libs import os import sys", "url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key", "database. Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous", "(3, 4): #tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__,", "setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus DHT database. Nested", "Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3,", "enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\" #", "https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\" # Import python libs import os", "to running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most", "# Load the metadata using exec() in order not to trigger alder.__init__ import", "version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus DHT database. Nested key value", "author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test',", "alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info < (2,", "NameError: # We're most likely being frozen and __file__ triggered this NameError #", "to Alders's source's directory prior to running any command try: SETUP_DIRNAME = os.path.dirname(__file__)", "element # Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info", "sys.version_info < (3, 4): #tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup(", "coding: utf-8 -*- \"\"\" setup.py Basic setup file to enable pip install See:", "prior to running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're", "try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most likely being frozen and", "sdist upload \"\"\" # Import python libs import os import sys from setuptools", "os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata", "Event Roster', long_description=' Consensus DHT database. Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git',", "Lexical Distributed Event Roster', long_description=' Consensus DHT database. Nested key value store.' '", "triggered this NameError # Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME", "NameError # Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '':", "metadata using exec() in order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec'))", "#tuple comparison element by element # Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3',", "<filename>setup.py #!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" setup.py Basic setup file", "Distributed Event Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log',", "file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload", "# Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME)", "2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3, 4): #tuple", "DHT database. Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__,", "work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME =", "DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt',", "license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*',", "around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME)", "comparison element by element # Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1'", "sys from setuptools import setup, find_packages # Change to Alders's source's directory prior", "# Change to Alders's source's directory prior to running any command try: SETUP_DIRNAME", "most likely being frozen and __file__ triggered this NameError # Let's work around", "if sys.version_info < (3, 4): #tuple comparison element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ])", "'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info < (2, 7): #tuple comparison element", "download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key Value", "Roster', long_description=' Consensus DHT database. Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip',", "in order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0'", "import sys from setuptools import setup, find_packages # Change to Alders's source's directory", "= os.path.dirname(__file__) except NameError: # We're most likely being frozen and __file__ triggered", "ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info < (2, 7): #tuple comparison", "]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus DHT database.", "< (2, 7): #tuple comparison element by element # Under Python 2.6, also", "setup.py Basic setup file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py", "os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder',", "element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description='", "package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'],", "'': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'], },", "import os import sys from setuptools import setup, find_packages # Change to Alders's", "order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ]", "'*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'], }, install_requires=REQUIREMENTS, extras_require={}, #scripts=['scripts/alder'],", "Nested key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical", "__file__ triggered this NameError # Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if", "Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME", "source's directory prior to running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError:", "key value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed", "'log', 'log*']), package_data={ '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html', '*.css', '*.ico', '*.png',", "# -*- coding: utf-8 -*- \"\"\" setup.py Basic setup file to enable pip", "python # -*- coding: utf-8 -*- \"\"\" setup.py Basic setup file to enable", "value store.' ' ', url='https://github.com/AlderDHT/alder.git', download_url='https://github.com/AlderDHT/alder/archive/master.zip', author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event", "Load the metadata using exec() in order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(),", "being frozen and __file__ triggered this NameError # Let's work around that SETUP_DIRNAME", "Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '': ['*.txt', '*.md',", "setup, find_packages # Change to Alders's source's directory prior to running any command", "by element # Under Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if", "exec() in order not to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS =", "author=__author__, author_email='smith.samuel.m<EMAIL>', license=__license__, keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key Value Store'),", "SETUP_DIRNAME != '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') #", "REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus", "name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event Roster', long_description=' Consensus DHT database. Nested key", "utf-8 -*- \"\"\" setup.py Basic setup file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html", "Lexical Distributed Event Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*',", "Event Roster Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']),", "this NameError # Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME !=", "to trigger alder.__init__ import exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec')) REQUIREMENTS = ['libnacl>=1.4.0' ] if sys.version_info", "to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist upload \"\"\"", "\"\"\" # Import python libs import os import sys from setuptools import setup,", "Change to Alders's source's directory prior to running any command try: SETUP_DIRNAME =", "-*- \"\"\" setup.py Basic setup file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools", "except NameError: # We're most likely being frozen and __file__ triggered this NameError", "Alders's source's directory prior to running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except", "Python 2.6, also install REQUIREMENTS.extend([ 'importlib>=1.0.3', 'argparse>=1.2.1' ]) if sys.version_info < (3, 4):", "Consensus DHT Key Value Store'), packages=find_packages(exclude=['test', 'test.*', 'docs', 'docs*', 'log', 'log*']), package_data={ '':", "SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata using", "Import python libs import os import sys from setuptools import setup, find_packages #", "element by element REQUIREMENTS.extend([ 'enum34>=1.0.4', ]) setup( name='alder', version=__version__, description='Asynchrounous Lexical Distributed Event", "setup file to enable pip install See: http://pythonhosted.org//setuptools/setuptools.html https://pypi.python.org/pypi/setuptools python setup.py register sdist", "!= '': os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load", "command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most likely being frozen", "from setuptools import setup, find_packages # Change to Alders's source's directory prior to", "] if sys.version_info < (2, 7): #tuple comparison element by element # Under", "os.path.dirname(__file__) except NameError: # We're most likely being frozen and __file__ triggered this", "= os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py') # Load the metadata using exec() in order not" ]
[ "Generated by Django 3.0.5 on 2020-05-06 16:47 from django.db import migrations import secrets", "= EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4) # URL-eket", "if key in events_dict: key_event = events_dict[key] event.delete() else: event.save() key_event = event", "= (event.location, event.liturgy) if key in events_dict: key_event = events_dict[key] event.delete() else: event.save()", "events_dict[key] = key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma',", "import secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule')", "migrations import secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma',", "key_event = events_dict[key] event.delete() else: event.save() key_event = event events_dict[key] = key_event schedule.event", "= event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk, még", "for event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time", "2020-05-06 16:47 from django.db import migrations import secrets def copy_schedule(apps, schema_editor): Event =", "schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for", "key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'), ]", "nincs napi egyedi érték sehol key = (event.location, event.liturgy) if key in events_dict:", "in events_dict: key_event = events_dict[key] event.delete() else: event.save() key_event = event events_dict[key] =", "import migrations import secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule =", "még nincs napi egyedi érték sehol key = (event.location, event.liturgy) if key in", "másolunk, még nincs napi egyedi érték sehol key = (event.location, event.liturgy) if key", "EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event in Event.objects.all(): schedule =", "= apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event in Event.objects.all(): schedule = EventSchedule()", "'EventSchedule') events_dict = {} for event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week =", "events_dict = {} for event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week", "in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash =", "schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk, még nincs napi egyedi érték sehol", "by Django 3.0.5 on 2020-05-06 16:47 from django.db import migrations import secrets def", "event.save() key_event = event events_dict[key] = key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration):", "= event events_dict[key] = key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies =", "copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {}", "Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event", "event events_dict[key] = key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies = [", "= key_event schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'), ] operations =", "apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event in Event.objects.all():", "events_dict: key_event = events_dict[key] event.delete() else: event.save() key_event = event events_dict[key] = key_event", "event.time schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk, még nincs napi egyedi érték", "16:47 from django.db import migrations import secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma',", "key_event schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'), ] operations = [", "secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict", "nem másolunk, még nincs napi egyedi érték sehol key = (event.location, event.liturgy) if", "= key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'),", "schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'), ] operations", "(event.location, event.liturgy) if key in events_dict: key_event = events_dict[key] event.delete() else: event.save() key_event", "on 2020-05-06 16:47 from django.db import migrations import secrets def copy_schedule(apps, schema_editor): Event", "# URL-eket nem másolunk, még nincs napi egyedi érték sehol key = (event.location,", "secrets.token_hex(4) # URL-eket nem másolunk, még nincs napi egyedi érték sehol key =", "= secrets.token_hex(4) # URL-eket nem másolunk, még nincs napi egyedi érték sehol key", "sehol key = (event.location, event.liturgy) if key in events_dict: key_event = events_dict[key] event.delete()", "'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event in Event.objects.all(): schedule", "= {} for event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time", "django.db import migrations import secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule", "érték sehol key = (event.location, event.liturgy) if key in events_dict: key_event = events_dict[key]", "URL-eket nem másolunk, még nincs napi egyedi érték sehol key = (event.location, event.liturgy)", "event.delete() else: event.save() key_event = event events_dict[key] = key_event schedule.event = key_event schedule.save()", "schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk,", "Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4)", "{} for event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time =", "key in events_dict: key_event = events_dict[key] event.delete() else: event.save() key_event = event events_dict[key]", "key = (event.location, event.liturgy) if key in events_dict: key_event = events_dict[key] event.delete() else:", "EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4) # URL-eket nem", "else: event.save() key_event = event events_dict[key] = key_event schedule.event = key_event schedule.save() class", "schedule.time = event.time schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk, még nincs napi", "events_dict[key] event.delete() else: event.save() key_event = event events_dict[key] = key_event schedule.event = key_event", "schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4) #", "Django 3.0.5 on 2020-05-06 16:47 from django.db import migrations import secrets def copy_schedule(apps,", "event.day_of_week schedule.time = event.time schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk, még nincs", "event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week = event.day_of_week schedule.time = event.time schedule.hash", "= events_dict[key] event.delete() else: event.save() key_event = event events_dict[key] = key_event schedule.event =", "key_event = event events_dict[key] = key_event schedule.event = key_event schedule.save() class Migration(migrations.Migration): dependencies", "egyedi érték sehol key = (event.location, event.liturgy) if key in events_dict: key_event =", "def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict =", "class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'), ] operations = [ migrations.RunPython(copy_schedule) ]", "event.liturgy) if key in events_dict: key_event = events_dict[key] event.delete() else: event.save() key_event =", "= apps.get_model('zsolozsma', 'Event') EventSchedule = apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event in", "apps.get_model('zsolozsma', 'EventSchedule') events_dict = {} for event in Event.objects.all(): schedule = EventSchedule() schedule.day_of_week", "# Generated by Django 3.0.5 on 2020-05-06 16:47 from django.db import migrations import", "= event.time schedule.hash = secrets.token_hex(4) # URL-eket nem másolunk, még nincs napi egyedi", "schedule.save() class Migration(migrations.Migration): dependencies = [ ('zsolozsma', '0013_eventschedule'), ] operations = [ migrations.RunPython(copy_schedule)", "napi egyedi érték sehol key = (event.location, event.liturgy) if key in events_dict: key_event", "from django.db import migrations import secrets def copy_schedule(apps, schema_editor): Event = apps.get_model('zsolozsma', 'Event')", "3.0.5 on 2020-05-06 16:47 from django.db import migrations import secrets def copy_schedule(apps, schema_editor):" ]
[ "Command, limit: int, units: str): '''Move to positive/negative limit''' try: if limit ==", "__init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is at", "command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, ) except", "from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self, _svcName):", "try: if limit == -1: command.info(text=\"move to negative\") elif limit == 1: command.info(text=\"move", "type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit: int, units:", "2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX", "to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units,", ") self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, ) except Exception as e: command.fail(error=e)", "clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self,", "@command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit:", "# -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15", "@BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit())", "if limit == -1: command.info(text=\"move to negative\") elif limit == 1: command.info(text=\"move to", "@click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit: int, units: str):", "# @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException", "as np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def", "not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(),", "positive/negative limit''' try: if limit == -1: command.info(text=\"move to negative\") elif limit ==", "import BasdaService import Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker import * class", "async def isAtLimit(self, command: Command): '''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except", "Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def", "isAtLimit(self, command: Command): '''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as", ".BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self,", "else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), )", "def isAtLimit(self, command: Command): '''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception", "@BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit: int, units: str): '''Move to positive/negative", "# @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import", "-*- # # @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py #", "while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish(", "int, units: str): '''Move to positive/negative limit''' try: if limit == -1: command.info(text=\"move", "(<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import", "Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, ) except Exception as", "at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\",", "@click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit: int,", "np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self,", "def moveToLimit(self, command: Command, limit: int, units: str): '''Move to positive/negative limit''' try:", "command: Command, limit: int, units: str): '''Move to positive/negative limit''' try: if limit", "coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename:", "* class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\")", "'''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\")", "to negative\") elif limit == 1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while", "import BasdaMoccaException import BasdaMoccaX import BasdaService import Nice import numpy as np from", "@command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is at positive/negative limit''' try: return", "-*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 #", "# @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD", "limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\",", "try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str,", "units: str): '''Move to positive/negative limit''' try: if limit == -1: command.info(text=\"move to", "BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async", "to positive/negative limit''' try: if limit == -1: command.info(text=\"move to negative\") elif limit", "'''Move to positive/negative limit''' try: if limit == -1: command.info(text=\"move to negative\") elif", "worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command):", "x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command:", "command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command,", "BasdaMoccaException import BasdaMoccaX import BasdaService import Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker", "str): '''Move to positive/negative limit''' try: if limit == -1: command.info(text=\"move to negative\")", "_svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is at positive/negative limit''' try:", "self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units),", "utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py", "BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is at positive/negative limit'''", "command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units),", "BasdaMoccaX import BasdaService import Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker import *", "command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper", "negative\") elif limit == 1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not", "# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService import Nice", "as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self,", "== 1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1)", "except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async", "self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return", "class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper", "Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu", "-1: command.info(text=\"move to negative\") elif limit == 1: command.info(text=\"move to positive\") else: command.finish()", "BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService import Nice import numpy", "command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait()", "(http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService import Nice import numpy as np", "asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, )", "limit == 1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await", "@License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService import Nice import", "e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command:", "@Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import", "async def moveToLimit(self, command: Command, limit: int, units: str): '''Move to positive/negative limit'''", "import Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python", "def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is", "limit''' try: if limit == -1: command.info(text=\"move to negative\") elif limit == 1:", "import numpy as np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x", "moveToLimit(self, command: Command, limit: int, units: str): '''Move to positive/negative limit''' try: if", "Command): '''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e)", "import BasdaMoccaX import BasdaService import Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker import", "elif limit == 1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone():", "command.info(text=\"move to negative\") elif limit == 1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit)", "BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService import", "BasdaService import Nice import numpy as np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker):", "command: Command): '''Is at positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e:", "numpy as np from .BasdaMoccaCluPythonServiceWorker import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\"", "@Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService", "limit == -1: command.info(text=\"move to negative\") elif limit == 1: command.info(text=\"move to positive\")", "# # @Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License:", "== -1: command.info(text=\"move to negative\") elif limit == 1: command.info(text=\"move to positive\") else:", "Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, ) except Exception as e:", "limit: int, units: str): '''Move to positive/negative limit''' try: if limit == -1:", "return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int) @click.argument(\"UNITS\", type=str, default=\"STEPS\")", "3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import BasdaMoccaException import BasdaMoccaX import BasdaService import Nice import numpy as", "@Author: <NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause", "type=str, default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit: int, units: str): '''Move", "positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(),", "DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, ) except Exception", "default=\"STEPS\") @BasdaCluPythonServiceWorker.wrapper async def moveToLimit(self, command: Command, limit: int, units: str): '''Move to", "await asyncio.sleep(0.1) command.info( DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units, Velocity=self.service.getVelocity(), ) self.service.moveToLimitWait() return command.finish( AtLimit=self.service.isAtLimit(), DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units), Units=units,", "\"python clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def", "import * class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker): \"python clu x worker\" def __init__(self, _svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName)", "positive/negative limit''' try: return command.finish(AtLimit=self.service.isAtLimit()) except Exception as e: command.fail(error=e) @command_parser.command(\"moveToLimit\") @click.argument(\"LIMIT\", type=int)", "_svcName): BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName) @command_parser.command(\"isAtLimit\") @BasdaCluPythonServiceWorker.wrapper async def isAtLimit(self, command: Command): '''Is at positive/negative", "1: command.info(text=\"move to positive\") else: command.finish() self.service.moveToLimitStart(limit) while not self.service.moveToLimitCompletion().isDone(): await asyncio.sleep(0.1) command.info(", "<NAME> (<EMAIL>) # @Date: 2021-06-15 # @Filename: BasdaMoccaXCluPythonServiceWorker.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)", "<filename>python/lvmtan/BasdaMoccaXCluPythonServiceWorker.py<gh_stars>0 # -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL>) # @Date:" ]
[ "coding=utf-8 from zentropi import run_agents from .zentweepy import ZenTweepy def main(): zentweepy =", ".zentweepy import ZenTweepy def main(): zentweepy = ZenTweepy(name='ZenTweepy', auth='<PASSWORD>') run_agents(zentweepy, shell=False, space='zentropia', endpoint='wss://zentropi.com/')", "# coding=utf-8 from zentropi import run_agents from .zentweepy import ZenTweepy def main(): zentweepy", "from .zentweepy import ZenTweepy def main(): zentweepy = ZenTweepy(name='ZenTweepy', auth='<PASSWORD>') run_agents(zentweepy, shell=False, space='zentropia',", "zentropi import run_agents from .zentweepy import ZenTweepy def main(): zentweepy = ZenTweepy(name='ZenTweepy', auth='<PASSWORD>')", "<filename>examples/zentweepy/src/zentweepy/cli.py # coding=utf-8 from zentropi import run_agents from .zentweepy import ZenTweepy def main():", "from zentropi import run_agents from .zentweepy import ZenTweepy def main(): zentweepy = ZenTweepy(name='ZenTweepy',", "run_agents from .zentweepy import ZenTweepy def main(): zentweepy = ZenTweepy(name='ZenTweepy', auth='<PASSWORD>') run_agents(zentweepy, shell=False,", "import run_agents from .zentweepy import ZenTweepy def main(): zentweepy = ZenTweepy(name='ZenTweepy', auth='<PASSWORD>') run_agents(zentweepy," ]
[ "options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'},", "from django.db import migrations, models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies =", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total", "), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form", "migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift',", "django.db import migrations, models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies = [", "= [ ('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural':", "verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration", "model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign',", "migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField(", "import migrations, models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns',", "'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is", "model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'),", "), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form", "# -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-08-28 18:14", "), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField(", "field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign',", "name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end',", "verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField(", "name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign',", "[ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift',", "<filename>src/campaigns/migrations/0009_auto_20160828_2114.py # -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-08-28", "operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name':", "coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-08-28 18:14 from __future__", "('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ),", "model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField(", "name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift',", "name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'),", "on 2016-08-28 18:14 from __future__ import unicode_literals from django.db import migrations, models import", "dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign',", "), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'),", "'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField(", "name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255,", "field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header',", "'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign',", "), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift', name='volunteers', field=models.ManyToManyField(blank=True, to='volunteers.Volunteer', verbose_name='Volunteers'),", "1.9.8 on 2016-08-28 18:14 from __future__ import unicode_literals from django.db import migrations, models", "), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ),", "field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'),", "to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'),", "from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import tinymce.models", "active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form", "migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'),", "), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift',", "name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField(", "] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift',", "models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ]", "migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField(", "), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end',", "-*- # Generated by Django 1.9.8 on 2016-08-28 18:14 from __future__ import unicode_literals", "migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField(", "model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'),", "shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ),", "name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day',", "model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name',", "model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift', name='volunteers', field=models.ManyToManyField(blank=True, to='volunteers.Volunteer', verbose_name='Volunteers'), ), ]", "migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right", "model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ),", "verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ),", "model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift',", "2016-08-28 18:14 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion", "field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift', name='volunteers', field=models.ManyToManyField(blank=True, to='volunteers.Volunteer',", "field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'),", "name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ),", "# Generated by Django 1.9.8 on 2016-08-28 18:14 from __future__ import unicode_literals from", "to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ),", "import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations", "), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'),", "field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'),", "name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign", "__future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import tinymce.models class", "footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration", "panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'),", "Generated by Django 1.9.8 on 2016-08-28 18:14 from __future__ import unicode_literals from django.db", "class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign',", "'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign',", "migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift', name='volunteers', field=models.ManyToManyField(blank=True, to='volunteers.Volunteer', verbose_name='Volunteers'), ),", "tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions(", "utf-8 -*- # Generated by Django 1.9.8 on 2016-08-28 18:14 from __future__ import", "), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField(", "Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name':", "name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location',", "model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift',", "field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration", "migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location',", "), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ),", "migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign',", "field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign',", "= [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign", "form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign',", "name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift', name='volunteers', field=models.ManyToManyField(blank=True,", "model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ), migrations.AlterField( model_name='campaignlocationshift',", "by Django 1.9.8 on 2016-08-28 18:14 from __future__ import unicode_literals from django.db import", "name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ),", "-*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-08-28 18:14 from", "header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign', name='start',", "migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ),", "model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places', field=models.IntegerField(verbose_name='Total places'), ), migrations.AlterField( model_name='campaignlocationshift', name='volunteers',", "migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField(", "'0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions(", "unicode_literals from django.db import migrations, models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies", "form footer'), ), migrations.AlterField( model_name='campaign', name='registration_form_header', field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel',", "model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift',", "[ ('campaigns', '0008_auto_20160828_1608'), ] operations = [ migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'},", "'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'),", "18:14 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import", "django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations =", "field=tinymce.models.HTMLField(verbose_name='Registration form header'), ), migrations.AlterField( model_name='campaign', name='registration_form_right_panel', field=tinymce.models.HTMLField(verbose_name='Registration form right panel'), ), migrations.AlterField(", "name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer',", "'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ),", "), migrations.AlterField( model_name='campaignlocationshift', name='end', field=models.TimeField(verbose_name='End'), ), migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ),", "shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active',", "field=models.DateField(verbose_name='End'), ), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'),", "name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='total_places',", "right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign',", "Django 1.9.8 on 2016-08-28 18:14 from __future__ import unicode_literals from django.db import migrations,", "import tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'), ] operations = [", "migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ),", "), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'), ), migrations.AlterField( model_name='campaignlocationshift', name='day', field=models.DateField(verbose_name='Day'), ),", "import unicode_literals from django.db import migrations, models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration):", "form right panel'), ), migrations.AlterField( model_name='campaign', name='start', field=models.DateField(verbose_name='Start'), ), migrations.AlterField( model_name='campaignlocationshift', name='campaign', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "migrations.AlterField( model_name='campaignlocationshift', name='location', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'), ), migrations.AlterField( model_name='campaignlocationshift', name='start', field=models.TimeField(verbose_name='Start'), ), migrations.AlterField(", "), migrations.AlterField( model_name='campaign', name='is_active', field=models.BooleanField(verbose_name='Is active'), ), migrations.AlterField( model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ),", "model_name='campaign', name='name', field=models.CharField(max_length=255, verbose_name='Name'), ), migrations.AlterField( model_name='campaign', name='registration_form_footer', field=tinymce.models.HTMLField(verbose_name='Registration form footer'), ), migrations.AlterField(", "options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'}, ), migrations.AlterField( model_name='campaign', name='end', field=models.DateField(verbose_name='End'), ), migrations.AlterField(", "migrations, models import django.db.models.deletion import tinymce.models class Migration(migrations.Migration): dependencies = [ ('campaigns', '0008_auto_20160828_1608'),", "migrations.AlterModelOptions( name='campaign', options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'}, ), migrations.AlterModelOptions( name='campaignlocationshift', options={'verbose_name': 'Campaign shift', 'verbose_name_plural':" ]
[ "distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error between an existing recording with", "help='Used to record a gesture to a file.') record_parser.add_argument('gesture_file', help='The path of the", "gestures and tries to match them against those described in the config file.')", "file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries to match them", "the error between an existing recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The path", "to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error between an existing", "'__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record", "pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries to", "recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error between", "distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens", "gesture to a file.') record_parser.add_argument('gesture_file', help='The path of the file to save the", "the file to save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used", "for gestures and tries to match them against those described in the config", "config file.') listen_parser.add_argument('config_file', help='The path to the gesture config file.') listen_parser.set_defaults(func=commands.listen) args =", "= subparsers.add_parser('distance', help='Used to evaluate the error between an existing recording with a", "tries to match them against those described in the config file.') listen_parser.add_argument('config_file', help='The", "described in the config file.') listen_parser.add_argument('config_file', help='The path to the gesture config file.')", "error between an existing recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The path to", "__name__ == '__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used", "if __name__ == '__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record',", "parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record a gesture to a file.') record_parser.add_argument('gesture_file',", "against those described in the config file.') listen_parser.add_argument('config_file', help='The path to the gesture", "evaluate the error between an existing recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The", "record_parser.add_argument('gesture_file', help='The path of the file to save the recorded gesture to.') record_parser.set_defaults(func=commands.record)", "new gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser =", "recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.')", "existing recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture", "to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures and", "help='The path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for", "parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record a", "record a gesture to a file.') record_parser.add_argument('gesture_file', help='The path of the file to", "subparsers.add_parser('distance', help='Used to evaluate the error between an existing recording with a new", "subparsers.add_parser('record', help='Used to record a gesture to a file.') record_parser.add_argument('gesture_file', help='The path of", "and tries to match them against those described in the config file.') listen_parser.add_argument('config_file',", "a gesture to a file.') record_parser.add_argument('gesture_file', help='The path of the file to save", "file.') listen_parser.add_argument('config_file', help='The path to the gesture config file.') listen_parser.set_defaults(func=commands.listen) args = parser.parse_args()", "to a file.') record_parser.add_argument('gesture_file', help='The path of the file to save the recorded", "ironmotion import commands if __name__ == '__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers()", "= subparsers.add_parser('record', help='Used to record a gesture to a file.') record_parser.add_argument('gesture_file', help='The path", "between an existing recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The path to a", "the config file.') listen_parser.add_argument('config_file', help='The path to the gesture config file.') listen_parser.set_defaults(func=commands.listen) args", "import commands if __name__ == '__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser", "match them against those described in the config file.') listen_parser.add_argument('config_file', help='The path to", "to record a gesture to a file.') record_parser.add_argument('gesture_file', help='The path of the file", "argparse from ironmotion import commands if __name__ == '__main__': parser = argparse.ArgumentParser() subparsers", "a file.') record_parser.add_argument('gesture_file', help='The path of the file to save the recorded gesture", "to evaluate the error between an existing recording with a new gesture.') distance_parser.add_argument('gesture_file',", "record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error between an existing recording", "distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries to match them against", "of the file to save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance',", "from ironmotion import commands if __name__ == '__main__': parser = argparse.ArgumentParser() subparsers =", "file.') record_parser.add_argument('gesture_file', help='The path of the file to save the recorded gesture to.')", "gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen',", "commands if __name__ == '__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser =", "subparsers.add_parser('listen', help='Listens for gestures and tries to match them against those described in", "save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate the", "argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record a gesture to", "the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error", "record_parser = subparsers.add_parser('record', help='Used to record a gesture to a file.') record_parser.add_argument('gesture_file', help='The", "a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries", "to match them against those described in the config file.') listen_parser.add_argument('config_file', help='The path", "in the config file.') listen_parser.add_argument('config_file', help='The path to the gesture config file.') listen_parser.set_defaults(func=commands.listen)", "import argparse from ironmotion import commands if __name__ == '__main__': parser = argparse.ArgumentParser()", "to save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate", "path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures", "== '__main__': parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to", "<gh_stars>1-10 import argparse from ironmotion import commands if __name__ == '__main__': parser =", "= argparse.ArgumentParser() subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record a gesture", "= parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record a gesture to a file.')", "gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error between an", "listen_parser.add_argument('config_file', help='The path to the gesture config file.') listen_parser.set_defaults(func=commands.listen) args = parser.parse_args() args.func(args)", "with a new gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance)", "them against those described in the config file.') listen_parser.add_argument('config_file', help='The path to the", "path of the file to save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser =", "a new gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser", "an existing recording with a new gesture.') distance_parser.add_argument('gesture_file', help='The path to a pre-recorded", "help='The path of the file to save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser", "help='Used to evaluate the error between an existing recording with a new gesture.')", "gesture file.') distance_parser.set_defaults(func=commands.distance) listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries to match", "help='Listens for gestures and tries to match them against those described in the", "those described in the config file.') listen_parser.add_argument('config_file', help='The path to the gesture config", "= subparsers.add_parser('listen', help='Listens for gestures and tries to match them against those described", "subparsers = parser.add_subparsers() record_parser = subparsers.add_parser('record', help='Used to record a gesture to a", "listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries to match them against those", "file to save the recorded gesture to.') record_parser.set_defaults(func=commands.record) distance_parser = subparsers.add_parser('distance', help='Used to" ]
[ "software distributed under the License is distributed on # an \"AS IS\" BASIS,", "ValueError(\"n_tags was None but we're trying to predict tags. Please include n_tags\") #", "head is enabled # extract ground truth malware label, convert it to float", "(default: True) use_tags: Whether to use the SMART tags for the data points", "if no loss_wts were provided set some default values if loss_wts is None:", "elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU else: # if the provided function", "append to return value the result of the tag head return rv #", "a new object and recursively copies the original object elements import torch #", "# whether to use the tags for the data points or not n_tags=None,", "array and save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if", "weights to assign to each head of the network (if it exists) \"\"\"", "respect to the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss", "F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or set to default if not provided)", "model base_out = self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) # append to return", "if 'tags' in labels: # if the tags head is enabled # extract", "use SMART tags as additional targets \"\"\" Take a set of results dicts", "loosely based on the one used in ALOHA: Auxiliary Loss Optimization for Hypothesis", "same shape of malware_labels # then calculate binary cross entropy loss with respect", "rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count additional target is", "target (default: False) use_tags: Whether to use SMART tags as additional targets (default:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "# a dictionary of labels loss_wts=None): # weights to assign to each head", "ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select", "enumerate(all_tags): # for all the tags # normalize ground truth tag array and", "not provided) weight = loss_wts['malware'] if 'malware' in loss_wts else 1.0 # copy", "= n_tags # if we set to use tags but n_tags was None", "was None but we're trying to predict tags. Please include n_tags\") # initialize", "# Developed as a thesis project at the TORSEC research group of the", "Whether to use the SMART tags for the data points or not (default:", "if the SMART tags additional targets are enabled for column, tag in enumerate(all_tags):", "use [512, 512, 128]) dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear activation function", "them out into a single dict of 1d arrays with appropriate column names", "nn.Sigmoid() # create a tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), #", "elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif activation_function.lower() == 'relu': self.activation_function =", "additional targets are enabled for column, tag in enumerate(all_tags): # for all the", "an ELU activation function module nn.Linear(64, 64), # append a Linear Layer with", "float and allocate them into the selected device (CPU or GPU) tag_labels =", "'tags' in labels: # if the tags head is enabled # extract ground", "integrated with autograd designed for maximum flexibility from .generators.dataset import Dataset from .utils.Net", "as well as a single layer for all tag predictions, performance will suffer", "Linear Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) #", "dictionary of losses if 'malware' in labels: # if the malware head is", "append to return value the result of the count head if self.use_tags: rv['tags']", "to use the counts as an additional target use_tags=False): # whether or not", "nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if the provided", "size in layer_sizes for i, ls in enumerate(layer_sizes): if i == 0: #", "truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or set", "Polytechnic of Turin (Italy) under the supervision # of professor <NAME> and engineer", "recognised, raise error raise ValueError('Unknown activation function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function))", "Layer with size layer_sizes[-1] x 1 nn.ReLU()) # append a Relu activation function", "= nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu':", "and each tag) Returns: Loss dictionary. \"\"\" # if no loss_wts were provided", "1.0 # copy calculated tags loss into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item())", "activation_function: Non-linear activation function to use (may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\")", "be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization function to use", "and get resulting embedding. Args: data: Current batch of data (features) Returns: Dictionary", "functional interface from torch import nn # a neural networks library deeply integrated", "the same shape of malware_labels # then calculate binary cross entropy loss with", "to the same shape of count_labels # then calculate poisson loss with respect", "activation function module nn.Linear(64, n_tags), # append a Linear Layer with size 64", "ls layers.append(self.activation_function()) # append an ELU activation function module layers.append(nn.Dropout(dropout_p)) # append a", "labels loss_wts: Weights to assign to each head of the network (if it", "default values if loss_wts is None: loss_wts = {'malware': 1.0, 'count': 0.1, 'tags':", "selected device (CPU or GPU) count_labels = labels['count'].float().to(device) # get predicted count, reshape", "Take a set of results dicts and break them out into a single", "get variables from config file device = config['general']['device'] class Net(baseNet): \"\"\" This is", "use the counts for the data points or not use_tags=True, # whether to", "a sigmoid activation function module def forward(self, data): # current batch of data", "based on the one used in ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation", "by applicable law or agreed to in writing, software distributed under the License", "and save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array", "portable way of using operating system dependent functionality from copy import deepcopy #", "variables from config file device = config['general']['device'] class Net(baseNet): \"\"\" This is a", "labels as a target (default: False) use_count: Whether to use the counts as", "enabled # extract ground truth malware label, convert it to float and allocate", "size (unused) (default: 32) layer_sizes: Layer sizes (array of sizes) (default: None ->", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "with strong GPU support import torch.nn.functional as F # pytorch neural network functional", "ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or set", "of 1d arrays with appropriate column names that pandas can convert to a", "to assign to each head of the network (if it exists) \"\"\" Compute", "append a sigmoid activation function module def forward(self, data): # current batch of", "data points or not n_tags=None, # number of tags to predict feature_dimension=2381, #", "and read config file config = configparser.ConfigParser() config.read(config_filepath) # get variables from config", "provided) weight = loss_wts['tags'] if 'tags' in loss_wts else 1.0 # copy calculated", "normalize ground truth tag array and save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:,", "vendor detection count auxiliary losses). Args: predictions: A dictionary of results from the", "professor <NAME> and engineer <NAME> and with the support of engineer <NAME>. #", "function self.sigmoid = nn.Sigmoid() # create a tag multi-label classifying head self.tag_head =", "head is enabled # extract ground truth count, convert it to float and", "as a target (default: False) use_count: Whether to use the counts as an", "elements import torch # tensor library like NumPy, with strong GPU support import", "them into the selected device (CPU or GPU) tag_labels = labels['tags'].float().to(device) # get", "auxiliary losses). Args: predictions: A dictionary of results from the Net labels: A", "labels as a target use_count=False, # whether or not to use the counts", "= Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array and save it into rv", "of tags to predict (default: None) feature_dimension: Dimension of the input data feature", "'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif activation_function.lower()", "will suffer accordingly. \"\"\" def __init__(self, use_malware=True, # whether to use the malicious", "and allocate them into the selected device (CPU or GPU) tag_labels = labels['tags'].float().to(device)", "on the activation_function parameter if activation_function.lower() == 'elu': self.activation_function = nn.ELU elif activation_function.lower()", "if 'malware' in loss_wts else 1.0 # copy calculated malware loss into the", "1d arrays with appropriate column names that pandas can convert to a DataFrame.", "copy calculated tags loss into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update", "if the provided function is not recognised, raise error raise ValueError('Unknown activation function", "0.05) activation_function: Non-linear activation function to use (may be \"elu\", \"leakyRelu\", \"pRelu\" or", "labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size", "tags and vendor detection count auxiliary losses). Args: predictions: A dictionary of results", "and allocate it into the selected device (CPU or GPU) count_labels = labels['count'].float().to(device)", "the License for the # specific language governing permissions and limitations under the", "rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART tags additional targets", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "return value the result of the tag head return rv # return the", "dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss loss_dict['total'] += malware_loss * weight", "# copy calculated malware loss into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) #", "Forward batch of data through the net. Args: data: Current batch of data", "convert to a DataFrame. Args: labels_dict: Labels (ground truth) dictionary results_dict: Results (predicted", "a tuple from the layers list, then apply nn.Sequential to get a sequential", "save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array", "set to use tags but n_tags was None raise an exception if self.use_tags", "be the model base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head", "Dataset from .utils.Net import Net as baseNet # get tags from the dataset", "Latent space size (unused) (default: 32) layer_sizes: Layer sizes (array of sizes) (default:", "into the selected device # (CPU or GPU) malware_labels = labels['malware'].float().to(device) # get", "or implied. See the License for the # specific language governing permissions and", "get a sequential container # -> this will be the model base self.model_base", "# creates a new object and recursively copies the original object elements import", "of results from the Net labels: A dictionary of labels loss_wts: Weights to", "function module nn.Linear(64, n_tags), # append a Linear Layer with size 64 x", "or GPU) count_labels = labels['count'].float().to(device) # get predicted count, reshape it to the", "as a thesis project at the TORSEC research group of the Polytechnic of", "layer_sizes[-1] x 1 nn.Sigmoid()) # append a sigmoid activation function module # create", "get embedding forwarding the data through the base model return {'embedding': self.model_base(data)} @staticmethod", "batch of data (features) \"\"\" Forward batch of data through the net. Args:", "x 64 nn.ELU(), # append an ELU activation function module nn.Linear(64, 64), #", "Dataset.tags # get config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir", "(predicted labels) dictionary use_malware=False, # whether or not to use malware/benignware labels as", "(features) \"\"\" Forward batch of data through the net and get resulting embedding.", "normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d", "torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or set to default if not provided)", "Linear Layer with size layer_sizes[-1] x 1 nn.ReLU()) # append a Relu activation", "with respect to the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get", "layer_sizes[-1] x 1 nn.ReLU()) # append a Relu activation function module # sigmoid", "for column, tag in enumerate(all_tags): # for all the tags # normalize ground", "rv = {} # initialize return value # get base result forwarding the", "activation_function parameter if activation_function.lower() == 'elu': self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu':", "of the count head if self.use_tags: rv['tags'] = self.tag_head(base_out) # append to return", "# copy calculated tags loss into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) #", "the data points or not n_tags=None, # number of tags to predict feature_dimension=2381,", "is enabled # extract ground truth count, convert it to float and allocate", "sizes) dropout_p=0.05, # dropout probability activation_function='elu', # non-linear activation function to use normalization_function='batch_norm'):", "activation function module # sigmoid activation function self.sigmoid = nn.Sigmoid() # create a", "tags and then calculate binary cross entropy loss with respect to the ground", "https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return value dict if use_malware: # if", "it uses fewer (and smaller) layers, as well as a single layer for", "truth label array and save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize", "= labels['tags'].float().to(device) # get predicted tags and then calculate binary cross entropy loss", "# update total loss loss_dict['total'] += malware_loss * weight if 'count' in labels:", "config parser and read config file config = configparser.ConfigParser() config.read(config_filepath) # get variables", "function to use normalization_function='batch_norm'): # normalization function to use \"\"\" Initialize net. Args:", "SMART tags as additional targets (default: False) Returns: Dictionary containing labels and predictions.", "= Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count additional target is enabled #", "False) use_tags: Whether to use SMART tags as additional targets (default: False) Returns:", "malicious label for the data points or not (default: True) use_counts: Whether to", "malware_loss * weight if 'count' in labels: # if the count head is", "== 'relu': self.activation_function = nn.ReLU else: # if the provided function is not", "SMART tags additional targets are enabled for column, tag in enumerate(all_tags): # for", "the same shape of count_labels # then calculate poisson loss with respect to", "Dimension of the input data feature vector (default: 2381) embedding_dimension: Latent space size", "module # create count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append", "Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller)", "neural network functional interface from torch import nn # a neural networks library", "network (if it exists); defaults to values used in the ALOHA paper (1.0", "if the count additional target is enabled # normalize ground truth count array", "self.activation_function = nn.PReLU elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU else: # if", "a basic configuration language for Python programs import os # provides a portable", "file device = config['general']['device'] class Net(baseNet): \"\"\" This is a simple network loosely", "# append a Linear Layer with size 64 x 64 nn.ELU(), # append", "predicted count, reshape it to the same shape of count_labels # then calculate", "tuple from the layers list, then apply nn.Sequential to get a sequential container", "self.use_malware: rv['malware'] = self.malware_head(base_out) # append to return value the result of the", "tags but n_tags was None raise an exception if self.use_tags and self.n_tags is", "(features) Returns: Dictionary containing predicted labels. \"\"\" rv = {} # initialize return", "activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU", "if layer_sizes is None: layer_sizes = [512, 512, 128] # select activation function", "a neural networks library deeply integrated with autograd designed for maximum flexibility from", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "a Relu activation function module # sigmoid activation function self.sigmoid = nn.Sigmoid() #", "'relu': self.activation_function = nn.ReLU else: # if the provided function is not recognised,", "nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a", "to use the malicious label for the data points or not (default: True)", "tags from the dataset all_tags = Dataset.tags # get config file path nets_dir", "nn.ELU(), # append an ELU activation function module nn.Linear(64, n_tags), # append a", "will be the model base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling head", "file config = configparser.ConfigParser() config.read(config_filepath) # get variables from config file device =", "# sigmoid activation function self.sigmoid = nn.Sigmoid() # create a tag multi-label classifying", "whether or not to use the counts as an additional target use_tags=False): #", "# normalize predicted count array and save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count'])", "binary cross entropy loss with respect to the ground truth malware labels malware_loss", "predicted count array and save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags:", "self.use_tags: rv['tags'] = self.tag_head(base_out) # append to return value the result of the", "use malware/benignware labels as a target (default: False) use_count: Whether to use the", "self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif activation_function.lower() ==", "Forward batch of data through the net and get resulting embedding. Args: data:", "malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with", "use_count: Whether to use the counts as an additional target (default: False) use_tags:", "loss_wts else 1.0 # copy calculated malware loss into the loss dictionary loss_dict['malware']", "# append to return value the result of the tag head return rv", "dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss loss_dict['total'] += tags_loss * weight", "size ls layers.append(self.activation_function()) # append an ELU activation function module layers.append(nn.Dropout(dropout_p)) # append", "predictions: A dictionary of results from the Net labels: A dictionary of labels", "get tags from the dataset all_tags = Dataset.tags # get config file path", "{'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict = {'total': 0.} # initialize dictionary", "you may not use this file except in compliance with # the License.", "n_tags: Number of tags to predict (default: None) feature_dimension: Dimension of the input", "# if the provided function is not recognised, raise error raise ValueError('Unknown activation", "the selected device # (CPU or GPU) malware_labels = labels['malware'].float().to(device) # get predicted", "ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or", "if use_malware: # if the malware/benign target label is enabled # normalize malware", "dependent functionality from copy import deepcopy # creates a new object and recursively", "Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller) layers, as well", "same shape of count_labels # then calculate poisson loss with respect to the", "points or not (default: True) use_counts: Whether to use the counts for the", "use this file except in compliance with # the License. You may obtain", "count, convert it to float and allocate it into the selected device (CPU", "\"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization function to use (may be \"layer_norm\"", "Net as baseNet # get tags from the dataset all_tags = Dataset.tags #", "base result forwarding the data through the base model base_out = self.model_base(data) if", "the tags # normalize ground truth tag array and save it into rv", "in loss_wts else 1.0 # copy calculated count loss into the loss dictionary", "Returns: Loss dictionary. \"\"\" # if no loss_wts were provided set some default", "module # sigmoid activation function self.sigmoid = nn.Sigmoid() # create a tag multi-label", "the counts for the data points or not use_tags=True, # whether to use", "allocate it into the selected device # (CPU or GPU) malware_labels = labels['malware'].float().to(device)", "counts as an additional target use_tags=False): # whether or not to use SMART", "enabled # normalize ground truth count array and save it into rv rv['label_count']", "2.0 (the \"License\"); you may not use this file except in compliance with", "container # -> this will be the model base self.model_base = nn.Sequential(*tuple(layers)) #", "into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART tags additional", "read config file config = configparser.ConfigParser() config.read(config_filepath) # get variables from config file", "model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config", "done here to avoid a FD \"leak\" in the dataset generator # see", "count and each tag) Returns: Loss dictionary. \"\"\" # if no loss_wts were", "losses if 'malware' in labels: # if the malware head is enabled #", "embedding_dimension: Latent space size (unused) (default: 32) layer_sizes: Layer sizes (array of sizes)", "a lot of deepcopies are done here to avoid a FD \"leak\" in", "with the support of engineer <NAME>. # # Licensed under the Apache License,", "# append to return value the result of the count head if self.use_tags:", "binary cross entropy loss with respect to the ground truth tags tags_loss =", "from torch import nn # a neural networks library deeply integrated with autograd", "to use the tags for the data points or not n_tags=None, # number", "through the base model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary", "a Linear Layer with size 64 x n_tags nn.Sigmoid()) # append a sigmoid", "allocate them into the selected device (CPU or GPU) tag_labels = labels['tags'].float().to(device) #", "batch of data (features) Returns: Dictionary containing the resulting embedding. \"\"\" # get", "used in ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it", "Layer with size 64 x n_tags nn.Sigmoid()) # append a sigmoid activation function", "# append a Norm layer of size ls layers.append(self.activation_function()) # append an ELU", "to predict (default: None) feature_dimension: Dimension of the input data feature vector (default:", "False) use_count: Whether to use the counts as an additional target (default: False)", "to predict tags. Please include n_tags\") # initialize super class super().__init__() layers =", "module layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability of dropout dropout_p #", "provided) weight = loss_wts['count'] if 'count' in loss_wts else 1.0 # copy calculated", "self.use_tags = use_tags self.n_tags = n_tags # if we set to use tags", "default of [512, 512, 128] if layer_sizes is None: layer_sizes = [512, 512,", "activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select normalization", "# if the count head is enabled # extract ground truth count, convert", "rv['count'] = self.count_head(base_out) # append to return value the result of the count", "the count head if self.use_tags: rv['tags'] = self.tag_head(base_out) # append to return value", "of data (features) Returns: Dictionary containing the resulting embedding. \"\"\" # get embedding", "selected device (CPU or GPU) tag_labels = labels['tags'].float().to(device) # get predicted tags and", "ELU activation function module nn.Linear(64, 64), # append a Linear Layer with size", "forward(self, data): # current batch of data (features) \"\"\" Forward batch of data", "return value the result of the malware head if self.use_counts: rv['count'] = self.count_head(base_out)", "use the SMART tags for the data points or not (default: True) n_tags:", "recognised, raise error raise ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or", "self.n_tags = n_tags # if we set to use tags but n_tags was", "for maximum flexibility from .generators.dataset import Dataset from .utils.Net import Net as baseNet", "the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total loss loss_dict['total'] += count_loss", "to avoid a FD \"leak\" in the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189", "with size layer_sizes[-1] x 1 nn.ReLU()) # append a Relu activation function module", "def compute_loss(predictions, # a dictionary of results from the Net labels, # a", "Results (predicted labels) dictionary use_malware: Whether to use malware/benignware labels as a target", "sizes (array of sizes) dropout_p=0.05, # dropout probability activation_function='elu', # non-linear activation function", "enabled for column, tag in enumerate(all_tags): # for all the tags # normalize", "super().__init__() layers = [] # initialize layers array # if layer_sizes was not", "default if not provided) weight = loss_wts['count'] if 'count' in loss_wts else 1.0", "with # the License. You may obtain a copy of the License at", "parameter if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function", "loss_dict['total'] += tags_loss * weight return loss_dict # return the losses @staticmethod def", "get config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir)", "feature_dimension=2381, # dimension of the input data feature vector embedding_dimension=32, # latent space", "if not provided) weight = loss_wts['malware'] if 'malware' in loss_wts else 1.0 #", "ground truth count array and save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) #", "# extract ground truth tags, convert them to float and allocate them into", "Linear Layer with size layer_sizes[-1] x 1 nn.Sigmoid()) # append a sigmoid activation", "provided) weight = loss_wts['malware'] if 'malware' in loss_wts else 1.0 # copy calculated", "the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or", "are enabled for column, tag in enumerate(all_tags): # for all the tags #", "# the License. You may obtain a copy of the License at #", "use_tags=False): # whether or not to use SMART tags as additional targets \"\"\"", "self.use_counts = use_counts self.use_tags = use_tags self.n_tags = n_tags # if we set", "# initialize return value dict if use_malware: # if the malware/benign target label", "# create a tuple from the layers list, then apply nn.Sequential to get", "Returns: Dictionary containing labels and predictions. \"\"\" # a lot of deepcopies are", "results from the Net labels, # a dictionary of labels loss_wts=None): # weights", "label array and save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware", "n_tags # if we set to use tags but n_tags was None raise", "ls)) else: # append a Linear Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i", "tag head return rv # return the return value def get_embedding(self, data): #", "# # Developed as a thesis project at the TORSEC research group of", "deepcopy # creates a new object and recursively copies the original object elements", "tag in enumerate(all_tags): # for all the tags # normalize ground truth tag", "count head is enabled # extract ground truth count, convert it to float", "the supervision # of professor <NAME> and engineer <NAME> and with the support", "True) use_tags: Whether to use the SMART tags for the data points or", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# whether or not to use the counts as an additional target use_tags=False):", "else 1.0 # copy calculated tags loss into the loss dictionary loss_dict['tags'] =", "ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or set", "Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each layer size in layer_sizes for", "or not n_tags=None, # number of tags to predict feature_dimension=2381, # dimension of", "Layer with size layer_sizes[-1] x 64 nn.ELU(), # append an ELU activation function", "to default if not provided) weight = loss_wts['count'] if 'count' in loss_wts else", "engineer <NAME>. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "activation function module layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability of dropout", "Args: data: Current batch of data (features) Returns: Dictionary containing predicted labels. \"\"\"", "Developed as a thesis project at the TORSEC research group of the Polytechnic", "is enabled # extract ground truth malware label, convert it to float and", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "(the \"License\"); you may not use this file except in compliance with #", "return loss_dict # return the losses @staticmethod def normalize_results(labels_dict, # labels (ground truth)", "get predicted tags and then calculate binary cross entropy loss with respect to", "the Net labels, # a dictionary of labels loss_wts=None): # weights to assign", "(array of sizes) (default: None -> use [512, 512, 128]) dropout_p: Dropout probability", "net. Args: use_malware: Whether to use the malicious label for the data points", "= self.tag_head(base_out) # append to return value the result of the tag head", "Args: data: Current batch of data (features) Returns: Dictionary containing the resulting embedding.", "then apply nn.Sequential to get a sequential container # -> this will be", "ALOHA paper (1.0 for malware, 0.1 for count and each tag) Returns: Loss", "into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array and save", "Relu activation function module # sigmoid activation function self.sigmoid = nn.Sigmoid() # create", "\"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select normalization function to use based", "one used in ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that", "head if self.use_tags: rv['tags'] = self.tag_head(base_out) # append to return value the result", "in loss_wts else 1.0 # copy calculated malware loss into the loss dictionary", "loss with respect to the ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels)", "-> use [512, 512, 128]) dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear activation", "current batch of data (features) \"\"\" Forward batch of data through the net.", "use_malware self.use_counts = use_counts self.use_tags = use_tags self.n_tags = n_tags # if we", "append a Relu activation function module # sigmoid activation function self.sigmoid = nn.Sigmoid()", "append an ELU activation function module nn.Linear(64, 64), # append a Linear Layer", "Initialize net. Args: use_malware: Whether to use the malicious label for the data", "Returns: Dictionary containing the resulting embedding. \"\"\" # get embedding forwarding the data", "(optionally with SMART tags and vendor detection count auxiliary losses). Args: predictions: A", "to get a sequential container # -> this will be the model base", "the Net labels: A dictionary of labels loss_wts: Weights to assign to each", "malware/benign target label is enabled # normalize malware ground truth label array and", "512, 128] if layer_sizes is None: layer_sizes = [512, 512, 128] # select", "count additional target is enabled # normalize ground truth count array and save", "for the data points or not (default: True) use_tags: Whether to use the", "get base result forwarding the data through the base model base_out = self.model_base(data)", "exception if self.use_tags and self.n_tags is None: raise ValueError(\"n_tags was None but we're", "accordingly. \"\"\" def __init__(self, use_malware=True, # whether to use the malicious label for", "use_count=False, # whether or not to use the counts as an additional target", "nn.Linear(64, n_tags), # append a Linear Layer with size 64 x n_tags nn.Sigmoid())", "<reponame>cmikke97/AMSG # Copyright 2021, <NAME>. # # Developed as a thesis project at", "data: Current batch of data (features) Returns: Dictionary containing predicted labels. \"\"\" rv", "and save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label", "whether to use the malicious label for the data points or not use_counts=True,", "# normalize malware ground truth label array and save it into rv rv['label_malware']", "for the data points or not use_tags=True, # whether to use the tags", "function module nn.Linear(64, 64), # append a Linear Layer with size 64 x", "(default: True) use_counts: Whether to use the counts for the data points or", "tag) Returns: Loss dictionary. \"\"\" # if no loss_wts were provided set some", "normalization_function: Normalization function to use (may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\"", "super class super().__init__() layers = [] # initialize layers array # if layer_sizes", "the support of engineer <NAME>. # # Licensed under the Apache License, Version", "the result of the count head if self.use_tags: rv['tags'] = self.tag_head(base_out) # append", "append a Norm layer of size ls layers.append(self.activation_function()) # append an ELU activation", "# normalize ground truth tag array and save it into rv rv['label_{}_tag'.format(tag)] =", "predict tags. Please include n_tags\") # initialize super class super().__init__() layers = []", "dropout layer with probability of dropout dropout_p # create a tuple from the", "the SMART tags for the data points or not (default: True) n_tags: Number", "of data through the net. Args: data: Current batch of data (features) Returns:", "Number of tags to predict (default: None) feature_dimension: Dimension of the input data", "elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function =", "# get predicted tags and then calculate binary cross entropy loss with respect", "Args: predictions: A dictionary of results from the Net labels: A dictionary of", "not provided) weight = loss_wts['tags'] if 'tags' in loss_wts else 1.0 # copy", "calculated malware loss into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total", "loss_wts = {'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict = {'total': 0.} #", "a tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear", "labels['malware'].float().to(device) # get predicted malware label, reshape it to the same shape of", "model base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1],", "= [512, 512, 128] # select activation function to use based on the", "the losses @staticmethod def normalize_results(labels_dict, # labels (ground truth) dictionary results_dict, # results", "nn # a neural networks library deeply integrated with autograd designed for maximum", "to float and allocate it into the selected device # (CPU or GPU)", "target (default: False) use_count: Whether to use the counts as an additional target", "None: loss_wts = {'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict = {'total': 0.}", "'count' in loss_wts else 1.0 # copy calculated count loss into the loss", "the return value def get_embedding(self, data): # current batch of data (features) \"\"\"", "loss_wts were provided set some default values if loss_wts is None: loss_wts =", "data (features) Returns: Dictionary containing predicted labels. \"\"\" rv = {} # initialize", "128] # select activation function to use based on the activation_function parameter if", "as additional targets \"\"\" Take a set of results dicts and break them", "# whether or not to use SMART tags as additional targets \"\"\" Take", "nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer with size layer_sizes[-1] x 64 nn.ELU(),", "use the tags for the data points or not n_tags=None, # number of", "device = config['general']['device'] class Net(baseNet): \"\"\" This is a simple network loosely based", "\"relu\"' .format(activation_function)) # select normalization function to use based on the normalization_function parameter", "rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count additional target is enabled", "self.malware_head(base_out) # append to return value the result of the malware head if", "from copy import deepcopy # creates a new object and recursively copies the", "# create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear", "data (features) \"\"\" Forward batch of data through the net. Args: data: Current", "with size layer_sizes[-1] x 64 nn.ELU(), # append an ELU activation function module", "rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART tags additional targets are", "(may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization function to", "\"batch_norm\"' .format(activation_function)) # for each layer size in layer_sizes for i, ls in", "tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer", "Returns: Dictionary containing predicted labels. \"\"\" rv = {} # initialize return value", "the base model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary of", "(features) Returns: Dictionary containing the resulting embedding. \"\"\" # get embedding forwarding the", "SMART tags and vendor detection count auxiliary losses). Args: predictions: A dictionary of", "performance will suffer accordingly. \"\"\" def __init__(self, use_malware=True, # whether to use the", "count_loss * weight if 'tags' in labels: # if the tags head is", "with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append a", "each layer size in layer_sizes for i, ls in enumerate(layer_sizes): if i ==", "a target (default: False) use_count: Whether to use the counts as an additional", "= use_counts self.use_tags = use_tags self.n_tags = n_tags # if we set to", "# normalize predicted tag array and save it into rv rv['pred_{}_tag'.format(tag)] = Net.detach_and_copy_array(results_dict['tags'][:,", "writing, software distributed under the License is distributed on # an \"AS IS\"", "# instantiate config parser and read config file config = configparser.ConfigParser() config.read(config_filepath) #", "1), # append a Linear Layer with size layer_sizes[-1] x 1 nn.Sigmoid()) #", "get predicted malware label, reshape it to the same shape of malware_labels #", "containing predicted labels. \"\"\" rv = {} # initialize return value # get", "a sigmoid activation function module # create count poisson regression head self.count_head =", "net. Args: data: Current batch of data (features) Returns: Dictionary containing predicted labels.", "- 1], ls)) layers.append(self.normalization_function(ls)) # append a Norm layer of size ls layers.append(self.activation_function())", "network functional interface from torch import nn # a neural networks library deeply", "ground truth tags, convert them to float and allocate them into the selected", "append an ELU activation function module layers.append(nn.Dropout(dropout_p)) # append a dropout layer with", "= os.path.join(src_dir, 'config.ini') # instantiate config parser and read config file config =", "layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append a Norm layer", "the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision", "import configparser # implements a basic configuration language for Python programs import os", "use_counts=True, # whether to use the counts for the data points or not", "use based on the activation_function parameter if activation_function.lower() == 'elu': self.activation_function = nn.ELU", "the data through the base model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, #", "# whether or not to use malware/benignware labels as a target use_count=False, #", "normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if the provided normalization function", "of the Polytechnic of Turin (Italy) under the supervision # of professor <NAME>", "column]) # normalize predicted tag array and save it into rv rv['pred_{}_tag'.format(tag)] =", "Unless required by applicable law or agreed to in writing, software distributed under", "DataFrame. Args: labels_dict: Labels (ground truth) dictionary results_dict: Results (predicted labels) dictionary use_malware:", "deepcopy(count_loss.item()) # update total loss loss_dict['total'] += count_loss * weight if 'tags' in", ".format(activation_function)) # for each layer size in layer_sizes for i, ls in enumerate(layer_sizes):", "enumerate(layer_sizes): if i == 0: # append the first Linear Layer with dimensions", "data through the base model base_out = self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out)", "implied. See the License for the # specific language governing permissions and limitations", "activation function to use normalization_function='batch_norm'): # normalization function to use \"\"\" Initialize net.", "get resulting embedding. Args: data: Current batch of data (features) Returns: Dictionary containing", "# extract ground truth count, convert it to float and allocate it into", "self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if", "module def forward(self, data): # current batch of data (features) \"\"\" Forward batch", "ELU activation function module nn.Linear(64, n_tags), # append a Linear Layer with size", "# get tags from the dataset all_tags = Dataset.tags # get config file", "loss_dict = {'total': 0.} # initialize dictionary of losses if 'malware' in labels:", "\"relu\") (default: \"elu\") normalization_function: Normalization function to use (may be \"layer_norm\" or \"batch_norm\")", "get loss weight (or set to default if not provided) weight = loss_wts['count']", "labels (ground truth) dictionary results_dict, # results (predicted labels) dictionary use_malware=False, # whether", "if the malware head is enabled # extract ground truth malware label, convert", "nn.Linear(64, 64), # append a Linear Layer with size 64 x 64 nn.ELU(),", "normalize predicted count array and save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if", "it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and save", "\"\"\" # if no loss_wts were provided set some default values if loss_wts", "a dictionary of results from the Net labels, # a dictionary of labels", "# append a dropout layer with probability of dropout dropout_p # create a", "either express or implied. See the License for the # specific language governing", "the malicious label for the data points or not (default: True) use_counts: Whether", "lot of deepcopies are done here to avoid a FD \"leak\" in the", "results_dict, # results (predicted labels) dictionary use_malware=False, # whether or not to use", "total loss loss_dict['total'] += tags_loss * weight return loss_dict # return the losses", "= labels['malware'].float().to(device) # get predicted malware label, reshape it to the same shape", "to use \"\"\" Initialize net. Args: use_malware: Whether to use the malicious label", "in enumerate(all_tags): # for all the tags # normalize ground truth tag array", "to assign to each head of the network (if it exists); defaults to", "NumPy, with strong GPU support import torch.nn.functional as F # pytorch neural network", "(if it exists); defaults to values used in the ALOHA paper (1.0 for", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "else: # if the provided function is not recognised, raise error raise ValueError('Unknown", "in labels: # if the count head is enabled # extract ground truth", "detection count auxiliary losses). Args: predictions: A dictionary of results from the Net", "if loss_wts is None: loss_wts = {'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict", "the data points or not (default: True) n_tags: Number of tags to predict", "then initialize it to a default of [512, 512, 128] if layer_sizes is", "GPU) count_labels = labels['count'].float().to(device) # get predicted count, reshape it to the same", "create a tuple from the layers list, then apply nn.Sequential to get a", "else: # append a Linear Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i -", "target use_count=False, # whether or not to use the counts as an additional", "# append an ELU activation function module nn.Linear(64, 64), # append a Linear", "points or not (default: True) n_tags: Number of tags to predict (default: None)", "basic configuration language for Python programs import os # provides a portable way", "= Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and save it into rv rv['pred_count']", "the result of the malware head if self.use_counts: rv['count'] = self.count_head(base_out) # append", "ground truth count, convert it to float and allocate it into the selected", "to use based on the normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function =", "library like NumPy, with strong GPU support import torch.nn.functional as F # pytorch", "for malware, 0.1 for count and each tag) Returns: Loss dictionary. \"\"\" #", "count, reshape it to the same shape of count_labels # then calculate poisson", "malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or set to default if", "an ELU activation function module nn.Linear(64, n_tags), # append a Linear Layer with", "if the malware/benign target label is enabled # normalize malware ground truth label", "and save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the", "an additional target use_tags=False): # whether or not to use SMART tags as", "can convert to a DataFrame. Args: labels_dict: Labels (ground truth) dictionary results_dict: Results", "reshape it to the same shape of malware_labels # then calculate binary cross", "or agreed to in writing, software distributed under the License is distributed on", "layer sizes (array of sizes) dropout_p=0.05, # dropout probability activation_function='elu', # non-linear activation", "# whether to use the counts for the data points or not use_tags=True,", "predictions. \"\"\" # a lot of deepcopies are done here to avoid a", "use_malware: Whether to use the malicious label for the data points or not", "n_tags\") # initialize super class super().__init__() layers = [] # initialize layers array", "for count and each tag) Returns: Loss dictionary. \"\"\" # if no loss_wts", "provided set some default values if loss_wts is None: loss_wts = {'malware': 1.0,", "else 1.0 # copy calculated malware loss into the loss dictionary loss_dict['malware'] =", "forwarding the data through the base model base_out = self.model_base(data) if self.use_malware: rv['malware']", "head return rv # return the return value def get_embedding(self, data): # current", "loss with respect to the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) #", "number of tags to predict feature_dimension=2381, # dimension of the input data feature", "the layers list, then apply nn.Sequential to get a sequential container # ->", "labels. \"\"\" rv = {} # initialize return value # get base result", "a dropout layer with probability of dropout dropout_p # create a tuple from", "the data through the base model base_out = self.model_base(data) if self.use_malware: rv['malware'] =", "= nn.BatchNorm1d else: # if the provided normalization function is not recognised, raise", "# append a Linear Layer with size layer_sizes[-1] x 1 nn.Sigmoid()) # append", "(default: 0.05) activation_function: Non-linear activation function to use (may be \"elu\", \"leakyRelu\", \"pRelu\"", "tags, convert them to float and allocate them into the selected device (CPU", "normalize ground truth count array and save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count'])", "label for the data points or not (default: True) use_counts: Whether to use", "then calculate poisson loss with respect to the ground truth count count_loss =", "# if the count additional target is enabled # normalize ground truth count", "activation function self.sigmoid = nn.Sigmoid() # create a tag multi-label classifying head self.tag_head", "tag array and save it into rv rv['pred_{}_tag'.format(tag)] = Net.detach_and_copy_array(results_dict['tags'][:, column]) return rv", "loss with respect to the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) #", "64 x 64 nn.ELU(), # append an ELU activation function module nn.Linear(64, n_tags),", "results dicts and break them out into a single dict of 1d arrays", "like NumPy, with strong GPU support import torch.nn.functional as F # pytorch neural", "ValueError('Unknown activation function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each layer", "base model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary of results", "0.1 for count and each tag) Returns: Loss dictionary. \"\"\" # if no", "# if we set to use tags but n_tags was None raise an", "to use the counts for the data points or not (default: True) use_tags:", "the normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() ==", "# number of tags to predict feature_dimension=2381, # dimension of the input data", "Net labels, # a dictionary of labels loss_wts=None): # weights to assign to", "# weights to assign to each head of the network (if it exists)", "nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1] x 1 nn.Sigmoid())", "# return the return value def get_embedding(self, data): # current batch of data", "= F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or set to default if not", "the counts for the data points or not (default: True) use_tags: Whether to", "not to use the counts as an additional target use_tags=False): # whether or", "torch.nn.functional as F # pytorch neural network functional interface from torch import nn", "src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config parser and read", "loss_dict['count'] = deepcopy(count_loss.item()) # update total loss loss_dict['total'] += count_loss * weight if", "append to return value the result of the malware head if self.use_counts: rv['count']", "malware_labels = labels['malware'].float().to(device) # get predicted malware label, reshape it to the same", "config = configparser.ConfigParser() config.read(config_filepath) # get variables from config file device = config['general']['device']", "initialize return value # get base result forwarding the data through the base", "to use the counts for the data points or not use_tags=True, # whether", "set to default if not provided) weight = loss_wts['count'] if 'count' in loss_wts", "no loss_wts were provided set some default values if loss_wts is None: loss_wts", "[512, 512, 128] # select activation function to use based on the activation_function", "malware/benignware labels as a target use_count=False, # whether or not to use the", "the count additional target is enabled # normalize ground truth count array and", "= nn.ReLU else: # if the provided function is not recognised, raise error", "instantiate config parser and read config file config = configparser.ConfigParser() config.read(config_filepath) # get", "base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1),", "save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART", "# return the losses @staticmethod def normalize_results(labels_dict, # labels (ground truth) dictionary results_dict,", "function module def forward(self, data): # current batch of data (features) \"\"\" Forward", "the data points or not use_counts=True, # whether to use the counts for", "float and allocate it into the selected device # (CPU or GPU) malware_labels", "data through the net. Args: data: Current batch of data (features) Returns: Dictionary", "* weight if 'tags' in labels: # if the tags head is enabled", "use_malware=True, # whether to use the malicious label for the data points or", "into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array and save", "for the data points or not (default: True) use_counts: Whether to use the", "normalization function to use based on the normalization_function parameter if normalization_function.lower() == 'layer_norm':", "self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1] x", "if not provided) weight = loss_wts['count'] if 'count' in loss_wts else 1.0 #", "of the input data feature vector (default: 2381) embedding_dimension: Latent space size (unused)", "losses (optionally with SMART tags and vendor detection count auxiliary losses). Args: predictions:", "the base model base_out = self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) # append", "import os # provides a portable way of using operating system dependent functionality", "# Unless required by applicable law or agreed to in writing, software distributed", "dropout probability activation_function='elu', # non-linear activation function to use normalization_function='batch_norm'): # normalization function", "was None raise an exception if self.use_tags and self.n_tags is None: raise ValueError(\"n_tags", "may not use this file except in compliance with # the License. You", "# (CPU or GPU) malware_labels = labels['malware'].float().to(device) # get predicted malware label, reshape", "calculated count loss into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total", "'tags' in loss_wts else 1.0 # copy calculated tags loss into the loss", "enabled # extract ground truth tags, convert them to float and allocate them", "in enumerate(layer_sizes): if i == 0: # append the first Linear Layer with", "in layer_sizes for i, ls in enumerate(layer_sizes): if i == 0: # append", "support import torch.nn.functional as F # pytorch neural network functional interface from torch", "result forwarding the data through the base model base_out = self.model_base(data) if self.use_malware:", "weight = loss_wts['malware'] if 'malware' in loss_wts else 1.0 # copy calculated malware", "convert them to float and allocate them into the selected device (CPU or", "# initialize super class super().__init__() layers = [] # initialize layers array #", "raise ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) #", "layer_sizes is None: layer_sizes = [512, 512, 128] # select activation function to", "__init__(self, use_malware=True, # whether to use the malicious label for the data points", "the net and get resulting embedding. Args: data: Current batch of data (features)", "= nn.PReLU elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU else: # if the", "# get predicted malware label, reshape it to the same shape of malware_labels", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "create count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear", "= Dataset.tags # get config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir)", "-> this will be the model base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign", "(predicted labels) dictionary use_malware: Whether to use malware/benignware labels as a target (default:", "the data points or not (default: True) use_tags: Whether to use the SMART", "count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or set to default if", "the counts as an additional target use_tags=False): # whether or not to use", "and predictions. \"\"\" # a lot of deepcopies are done here to avoid", "dimension of the input data feature vector embedding_dimension=32, # latent space size (unused)", "def __init__(self, use_malware=True, # whether to use the malicious label for the data", "function to use (may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware =", "use \"\"\" Initialize net. Args: use_malware: Whether to use the malicious label for", "we're trying to predict tags. Please include n_tags\") # initialize super class super().__init__()", "for the data points or not n_tags=None, # number of tags to predict", "config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config parser and read config file config", "return value # get base result forwarding the data through the base model", "maximum flexibility from .generators.dataset import Dataset from .utils.Net import Net as baseNet #", "Non-linear activation function to use (may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default:", "not recognised, raise error raise ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\"", "ls in enumerate(layer_sizes): if i == 0: # append the first Linear Layer", "embedding. Args: data: Current batch of data (features) Returns: Dictionary containing the resulting", "uses fewer (and smaller) layers, as well as a single layer for all", "GPU support import torch.nn.functional as F # pytorch neural network functional interface from", "loss into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss loss_dict['total']", "Net(baseNet): \"\"\" This is a simple network loosely based on the one used", "function module layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability of dropout dropout_p", "limitations under the License. import configparser # implements a basic configuration language for", "module nn.Linear(64, 64), # append a Linear Layer with size 64 x 64", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total loss loss_dict['total'] += count_loss * weight", ".utils.Net import Net as baseNet # get tags from the dataset all_tags =", "if self.use_malware: rv['malware'] = self.malware_head(base_out) # append to return value the result of", "Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear", "for the # specific language governing permissions and limitations under the License. import", "# if the malware/benign target label is enabled # normalize malware ground truth", "None but we're trying to predict tags. Please include n_tags\") # initialize super", "as F # pytorch neural network functional interface from torch import nn #", "of engineer <NAME>. # # Licensed under the Apache License, Version 2.0 (the", "predict (default: None) feature_dimension: Dimension of the input data feature vector (default: 2381)", "x 1 nn.Sigmoid()) # append a sigmoid activation function module # create count", "append a Linear Layer with size layer_sizes[-1] x 1 nn.ReLU()) # append a", "batch of data (features) Returns: Dictionary containing predicted labels. \"\"\" rv = {}", "config file device = config['general']['device'] class Net(baseNet): \"\"\" This is a simple network", "True) use_counts: Whether to use the counts for the data points or not", "feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear Layer with dimensions", "rv = {} # initialize return value dict if use_malware: # if the", "\"\"\" Forward batch of data through the net. Args: data: Current batch of", "tag_labels) # get loss weight (or set to default if not provided) weight", "loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss loss_dict['total'] += tags_loss * weight return", "weight = loss_wts['tags'] if 'tags' in loss_wts else 1.0 # copy calculated tags", "resulting embedding. \"\"\" # get embedding forwarding the data through the base model", "it to the same shape of count_labels # then calculate poisson loss with", "raise ValueError(\"n_tags was None but we're trying to predict tags. Please include n_tags\")", "= {} # initialize return value # get base result forwarding the data", "nn.BatchNorm1d else: # if the provided normalization function is not recognised, raise error", "use based on the normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm", "the data points or not (default: True) use_counts: Whether to use the counts", "function module # sigmoid activation function self.sigmoid = nn.Sigmoid() # create a tag", "of data (features) Returns: Dictionary containing predicted labels. \"\"\" rv = {} #", "self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1] x", "containing labels and predictions. \"\"\" # a lot of deepcopies are done here", "of Turin (Italy) under the supervision # of professor <NAME> and engineer <NAME>", "else 1.0 # copy calculated count loss into the loss dictionary loss_dict['count'] =", "normalize malware ground truth label array and save it into rv rv['label_malware'] =", "update total loss loss_dict['total'] += tags_loss * weight return loss_dict # return the", "'malware' in loss_wts else 1.0 # copy calculated malware loss into the loss", "This is a simple network loosely based on the one used in ALOHA:", "entropy loss with respect to the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels)", "results from the Net labels: A dictionary of labels loss_wts: Weights to assign", "== 0: # append the first Linear Layer with dimensions feature_dimension x ls", "batch of data through the net. Args: data: Current batch of data (features)", "= deepcopy(malware_loss.item()) # update total loss loss_dict['total'] += malware_loss * weight if 'count'", "# provides a portable way of using operating system dependent functionality from copy", "# if no loss_wts were provided set some default values if loss_wts is", "an ELU activation function module layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability", "malicious label for the data points or not use_counts=True, # whether to use", "\"elu\") normalization_function: Normalization function to use (may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\")", "the first Linear Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: #", "return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary of results from the", "the input data feature vector embedding_dimension=32, # latent space size (unused) layer_sizes=None, #", "# labels (ground truth) dictionary results_dict, # results (predicted labels) dictionary use_malware=False, #", "get loss weight (or set to default if not provided) weight = loss_wts['tags']", "tags as additional targets \"\"\" Take a set of results dicts and break", "target use_tags=False): # whether or not to use SMART tags as additional targets", "calculate binary cross entropy loss with respect to the ground truth malware labels", "calculate binary cross entropy loss with respect to the ground truth tags tags_loss", "self.count_head(base_out) # append to return value the result of the count head if", "\"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts = use_counts self.use_tags = use_tags self.n_tags =", "truth malware label, convert it to float and allocate it into the selected", "# normalize malware predicted label array and save it into rv rv['pred_malware'] =", "dicts and break them out into a single dict of 1d arrays with", "the License. You may obtain a copy of the License at # #", "\"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each layer size in layer_sizes for i,", "not (default: True) use_tags: Whether to use the SMART tags for the data", "base_out = self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) # append to return value", "dictionary of labels loss_wts=None): # weights to assign to each head of the", "Python programs import os # provides a portable way of using operating system", "ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear Layer with dimensions layer_sizes[i-1] x", "import deepcopy # creates a new object and recursively copies the original object", "and with the support of engineer <NAME>. # # Licensed under the Apache", "append an ELU activation function module nn.Linear(64, n_tags), # append a Linear Layer", "# append a sigmoid activation function module def forward(self, data): # current batch", "as an additional target use_tags=False): # whether or not to use SMART tags", "append a sigmoid activation function module # create count poisson regression head self.count_head", "get_embedding(self, data): # current batch of data (features) \"\"\" Forward batch of data", "are done here to avoid a FD \"leak\" in the dataset generator #", "Dictionary containing predicted labels. \"\"\" rv = {} # initialize return value #", "n_tags), # append a Linear Layer with size 64 x n_tags nn.Sigmoid()) #", "implements a basic configuration language for Python programs import os # provides a", "elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if the provided normalization", "law or agreed to in writing, software distributed under the License is distributed", "and recursively copies the original object elements import torch # tensor library like", "dictionary. \"\"\" # if no loss_wts were provided set some default values if", "* weight if 'count' in labels: # if the count head is enabled", "tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or set to default", "total loss loss_dict['total'] += count_loss * weight if 'tags' in labels: # if", "raise ValueError('Unknown activation function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "if 'tags' in loss_wts else 1.0 # copy calculated tags loss into the", "were provided set some default values if loss_wts is None: loss_wts = {'malware':", "{}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each layer size in layer_sizes", "weight if 'tags' in labels: # if the tags head is enabled #", "layer with probability of dropout dropout_p # create a tuple from the layers", "ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer", "use the malicious label for the data points or not (default: True) use_counts:", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if the provided normalization function is not", "with respect to the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get", "result of the malware head if self.use_counts: rv['count'] = self.count_head(base_out) # append to", "an additional target (default: False) use_tags: Whether to use SMART tags as additional", "self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), #", "loss loss_dict['total'] += malware_loss * weight if 'count' in labels: # if the", "= use_malware self.use_counts = use_counts self.use_tags = use_tags self.n_tags = n_tags # if", "to float and allocate it into the selected device (CPU or GPU) count_labels", "embedding. \"\"\" # get embedding forwarding the data through the base model return", "import nn # a neural networks library deeply integrated with autograd designed for", "the net. Args: data: Current batch of data (features) Returns: Dictionary containing predicted", "# append a Linear Layer with size 64 x n_tags nn.Sigmoid()) # append", "# implements a basic configuration language for Python programs import os # provides", "counts as an additional target (default: False) use_tags: Whether to use SMART tags", "append a Linear Layer with size 64 x n_tags nn.Sigmoid()) # append a", "is enabled # extract ground truth tags, convert them to float and allocate", "label, reshape it to the same shape of malware_labels # then calculate binary", "512, 128]) dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear activation function to use", "trying to predict tags. Please include n_tags\") # initialize super class super().__init__() layers", "tags # normalize ground truth tag array and save it into rv rv['label_{}_tag'.format(tag)]", "a Norm layer of size ls layers.append(self.activation_function()) # append an ELU activation function", "1.0, 'count': 0.1, 'tags': 1.0} loss_dict = {'total': 0.} # initialize dictionary of", "into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count additional target", "reshape it to the same shape of count_labels # then calculate poisson loss", "select normalization function to use based on the normalization_function parameter if normalization_function.lower() ==", "copy calculated count loss into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update", "input data feature vector embedding_dimension=32, # latent space size (unused) layer_sizes=None, # layer", "def normalize_results(labels_dict, # labels (ground truth) dictionary results_dict, # results (predicted labels) dictionary", "of data (features) \"\"\" Forward batch of data through the net and get", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "governing permissions and limitations under the License. import configparser # implements a basic", "not use this file except in compliance with # the License. You may", "as additional targets (default: False) Returns: Dictionary containing labels and predictions. \"\"\" #", "use_counts self.use_tags = use_tags self.n_tags = n_tags # if we set to use", "'config.ini') # instantiate config parser and read config file config = configparser.ConfigParser() config.read(config_filepath)", "on the one used in ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700).", "not to use malware/benignware labels as a target use_count=False, # whether or not", "SMART tags for the data points or not (default: True) n_tags: Number of", "self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary of results from the Net labels,", "# append a Relu activation function module # sigmoid activation function self.sigmoid =", "language governing permissions and limitations under the License. import configparser # implements a", "initialize dictionary of losses if 'malware' in labels: # if the malware head", "if self.use_counts: rv['count'] = self.count_head(base_out) # append to return value the result of", "programs import os # provides a portable way of using operating system dependent", "autograd designed for maximum flexibility from .generators.dataset import Dataset from .utils.Net import Net", "Turin (Italy) under the supervision # of professor <NAME> and engineer <NAME> and", "not (default: True) n_tags: Number of tags to predict (default: None) feature_dimension: Dimension", "at the TORSEC research group of the Polytechnic of Turin (Italy) under the", "{} # initialize return value dict if use_malware: # if the malware/benign target", "all the tags # normalize ground truth tag array and save it into", "for each layer size in layer_sizes for i, ls in enumerate(layer_sizes): if i", "cross entropy loss with respect to the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'],", "2021, <NAME>. # # Developed as a thesis project at the TORSEC research", "count_labels = labels['count'].float().to(device) # get predicted count, reshape it to the same shape", "baseNet # get tags from the dataset all_tags = Dataset.tags # get config", "in the ALOHA paper (1.0 for malware, 0.1 for count and each tag)", "entropy loss with respect to the ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape),", "the malicious label for the data points or not use_counts=True, # whether to", "batch of data through the net and get resulting embedding. Args: data: Current", "if self.use_tags and self.n_tags is None: raise ValueError(\"n_tags was None but we're trying", "32) layer_sizes: Layer sizes (array of sizes) (default: None -> use [512, 512,", "malware, 0.1 for count and each tag) Returns: Loss dictionary. \"\"\" # if", "layers.append(self.activation_function()) # append an ELU activation function module layers.append(nn.Dropout(dropout_p)) # append a dropout", "if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function =", "module nn.Linear(64, n_tags), # append a Linear Layer with size 64 x n_tags", "points or not n_tags=None, # number of tags to predict feature_dimension=2381, # dimension", "include n_tags\") # initialize super class super().__init__() layers = [] # initialize layers", "base model base_out = self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) # append to", "data: Current batch of data (features) Returns: Dictionary containing the resulting embedding. \"\"\"", "malware head if self.use_counts: rv['count'] = self.count_head(base_out) # append to return value the", "distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT", "os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config parser and read config file", "(default: True) n_tags: Number of tags to predict (default: None) feature_dimension: Dimension of", "with probability of dropout dropout_p # create a tuple from the layers list,", "activation function module nn.Linear(64, 64), # append a Linear Layer with size 64", "and vendor detection count auxiliary losses). Args: predictions: A dictionary of results from", "1 nn.Sigmoid()) # append a sigmoid activation function module # create count poisson", "rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array and save it", "use (may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization function", "head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer with size layer_sizes[-1]", "loss_wts: Weights to assign to each head of the network (if it exists);", "vector embedding_dimension=32, # latent space size (unused) layer_sizes=None, # layer sizes (array of", "into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total loss loss_dict['total'] +=", "(default: 2381) embedding_dimension: Latent space size (unused) (default: 32) layer_sizes: Layer sizes (array", "recursively copies the original object elements import torch # tensor library like NumPy,", "dictionary results_dict: Results (predicted labels) dictionary use_malware: Whether to use malware/benignware labels as", "interface from torch import nn # a neural networks library deeply integrated with", "\"\"\" self.use_malware = use_malware self.use_counts = use_counts self.use_tags = use_tags self.n_tags = n_tags", "for all the tags # normalize ground truth tag array and save it", "torch # tensor library like NumPy, with strong GPU support import torch.nn.functional as", "layer_sizes was not defined (it is None) then initialize it to a default", "# if the tags head is enabled # extract ground truth tags, convert", "the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return", "device (CPU or GPU) tag_labels = labels['tags'].float().to(device) # get predicted tags and then", "labels['count'].float().to(device) # get predicted count, reshape it to the same shape of count_labels", "extract ground truth tags, convert them to float and allocate them into the", "(array of sizes) dropout_p=0.05, # dropout probability activation_function='elu', # non-linear activation function to", "set to default if not provided) weight = loss_wts['malware'] if 'malware' in loss_wts", "it into the selected device (CPU or GPU) count_labels = labels['count'].float().to(device) # get", "with appropriate column names that pandas can convert to a DataFrame. Args: labels_dict:", "way of using operating system dependent functionality from copy import deepcopy # creates", "the ALOHA paper (1.0 for malware, 0.1 for count and each tag) Returns:", "count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or set to default", "(default: False) Returns: Dictionary containing labels and predictions. \"\"\" # a lot of", "save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and", "if activation_function.lower() == 'elu': self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function =", "+= malware_loss * weight if 'count' in labels: # if the count head", "Whether to use SMART tags as additional targets (default: False) Returns: Dictionary containing", "Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller) layers, as well as", "not use_tags=True, # whether to use the tags for the data points or", "with autograd designed for maximum flexibility from .generators.dataset import Dataset from .utils.Net import", "normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm':", "or not (default: True) use_counts: Whether to use the counts for the data", "pandas can convert to a DataFrame. Args: labels_dict: Labels (ground truth) dictionary results_dict:", "the one used in ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note", "labels_dict: Labels (ground truth) dictionary results_dict: Results (predicted labels) dictionary use_malware: Whether to", "nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif activation_function.lower() == 'relu': self.activation_function", "net and get resulting embedding. Args: data: Current batch of data (features) Returns:", "data points or not use_tags=True, # whether to use the tags for the", "arrays with appropriate column names that pandas can convert to a DataFrame. Args:", "batch of data (features) \"\"\" Forward batch of data through the net and", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "for i, ls in enumerate(layer_sizes): if i == 0: # append the first", "assign to each head of the network (if it exists) \"\"\" Compute Net", "file except in compliance with # the License. You may obtain a copy", "and save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the", "1), # append a Linear Layer with size layer_sizes[-1] x 1 nn.ReLU()) #", "data feature vector embedding_dimension=32, # latent space size (unused) layer_sizes=None, # layer sizes", "function to use \"\"\" Initialize net. Args: use_malware: Whether to use the malicious", "the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or", "== 'elu': self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif", "\"\"\" def __init__(self, use_malware=True, # whether to use the malicious label for the", "= {'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict = {'total': 0.} # initialize", "a dictionary of labels loss_wts=None): # weights to assign to each head of", "to use (may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization", "A dictionary of labels loss_wts: Weights to assign to each head of the", "poisson loss with respect to the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels)", "head is enabled # extract ground truth tags, convert them to float and", "\"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization function to use (may be", "= nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer with size layer_sizes[-1] x 64", "malware label, reshape it to the same shape of malware_labels # then calculate", "in the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize", "on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Layer with size layer_sizes[-1] x 1 nn.Sigmoid()) # append a sigmoid activation function", "(features) \"\"\" Forward batch of data through the net. Args: data: Current batch", "from .utils.Net import Net as baseNet # get tags from the dataset all_tags", "well as a single layer for all tag predictions, performance will suffer accordingly.", "# pytorch neural network functional interface from torch import nn # a neural", "update total loss loss_dict['total'] += malware_loss * weight if 'count' in labels: #", "append a dropout layer with probability of dropout dropout_p # create a tuple", "Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and", "multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer with", "ELU activation function module layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability of", "parser and read config file config = configparser.ConfigParser() config.read(config_filepath) # get variables from", "latent space size (unused) layer_sizes=None, # layer sizes (array of sizes) dropout_p=0.05, #", "original object elements import torch # tensor library like NumPy, with strong GPU", "None: layer_sizes = [512, 512, 128] # select activation function to use based", "a target use_count=False, # whether or not to use the counts as an", "extract ground truth count, convert it to float and allocate it into the", "smaller) layers, as well as a single layer for all tag predictions, performance", "\"\"\" Compute Net losses (optionally with SMART tags and vendor detection count auxiliary", "to return value the result of the tag head return rv # return", "'elu': self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower()", "labels: # if the tags head is enabled # extract ground truth tags,", "False) Returns: Dictionary containing labels and predictions. \"\"\" # a lot of deepcopies", "Whether to use the counts as an additional target (default: False) use_tags: Whether", "weight (or set to default if not provided) weight = loss_wts['malware'] if 'malware'", "(or set to default if not provided) weight = loss_wts['count'] if 'count' in", "rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array and save it", "through the net. Args: data: Current batch of data (features) Returns: Dictionary containing", "count_labels) # get loss weight (or set to default if not provided) weight", "Args: use_malware: Whether to use the malicious label for the data points or", "function module # create count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), #", "then calculate binary cross entropy loss with respect to the ground truth tags", "Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count additional target is enabled # normalize", "64), # append a Linear Layer with size layer_sizes[-1] x 64 nn.ELU(), #", "use_tags: Whether to use the SMART tags for the data points or not", "nn.Sigmoid()) # append a sigmoid activation function module # create count poisson regression", "count loss into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total loss", "128] if layer_sizes is None: layer_sizes = [512, 512, 128] # select activation", "None: raise ValueError(\"n_tags was None but we're trying to predict tags. Please include", "layer_sizes = [512, 512, 128] # select activation function to use based on", "to the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight", "= use_tags self.n_tags = n_tags # if we set to use tags but", "predicted tags and then calculate binary cross entropy loss with respect to the", "is None: raise ValueError(\"n_tags was None but we're trying to predict tags. Please", "or not use_counts=True, # whether to use the counts for the data points", "labels['tags'].float().to(device) # get predicted tags and then calculate binary cross entropy loss with", "targets are enabled for column, tag in enumerate(all_tags): # for all the tags", "else: # if the provided normalization function is not recognised, raise error raise", "tag_labels = labels['tags'].float().to(device) # get predicted tags and then calculate binary cross entropy", "Dropout probability (default: 0.05) activation_function: Non-linear activation function to use (may be \"elu\",", "list, then apply nn.Sequential to get a sequential container # -> this will", "\"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select normalization function to use based on", "Whether to use the malicious label for the data points or not (default:", "and engineer <NAME> and with the support of engineer <NAME>. # # Licensed", "under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES", "result of the tag head return rv # return the return value def", "+= tags_loss * weight return loss_dict # return the losses @staticmethod def normalize_results(labels_dict,", "respect to the ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get", "use tags but n_tags was None raise an exception if self.use_tags and self.n_tags", "is enabled # normalize ground truth count array and save it into rv", "# append to return value the result of the malware head if self.use_counts:", "(CPU or GPU) malware_labels = labels['malware'].float().to(device) # get predicted malware label, reshape it", "= self.count_head(base_out) # append to return value the result of the count head", "(CPU or GPU) count_labels = labels['count'].float().to(device) # get predicted count, reshape it to", "and break them out into a single dict of 1d arrays with appropriate", "the provided function is not recognised, raise error raise ValueError('Unknown activation function {}.", "it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array and", "self.tag_head(base_out) # append to return value the result of the tag head return", "activation function to use (may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\")", "os.path.join(src_dir, 'config.ini') # instantiate config parser and read config file config = configparser.ConfigParser()", "tags. Please include n_tags\") # initialize super class super().__init__() layers = [] #", "float and allocate it into the selected device (CPU or GPU) count_labels =", "the Polytechnic of Turin (Italy) under the supervision # of professor <NAME> and", "sigmoid activation function module def forward(self, data): # current batch of data (features)", "embedding_dimension=32, # latent space size (unused) layer_sizes=None, # layer sizes (array of sizes)", "# get base result forwarding the data through the base model base_out =", "provides a portable way of using operating system dependent functionality from copy import", "pytorch neural network functional interface from torch import nn # a neural networks", "appropriate column names that pandas can convert to a DataFrame. Args: labels_dict: Labels", "(if it exists) \"\"\" Compute Net losses (optionally with SMART tags and vendor", "a Linear Layer with size layer_sizes[-1] x 1 nn.Sigmoid()) # append a sigmoid", "layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append a Norm layer of size ls", "to predict feature_dimension=2381, # dimension of the input data feature vector embedding_dimension=32, #", "tags head is enabled # extract ground truth tags, convert them to float", "# then calculate poisson loss with respect to the ground truth count count_loss", "License for the # specific language governing permissions and limitations under the License.", "(default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts = use_counts self.use_tags = use_tags self.n_tags", "truth) dictionary results_dict, # results (predicted labels) dictionary use_malware=False, # whether or not", "# non-linear activation function to use normalization_function='batch_norm'): # normalization function to use \"\"\"", "<NAME> and with the support of engineer <NAME>. # # Licensed under the", "losses). Args: predictions: A dictionary of results from the Net labels: A dictionary", "Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select normalization function to use", "'malware' in labels: # if the malware head is enabled # extract ground", "# see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return value dict if", "nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1] x 1 nn.ReLU())", "(default: False) use_count: Whether to use the counts as an additional target (default:", "append the first Linear Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else:", "\"\"\" rv = {} # initialize return value # get base result forwarding", "here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return value dict if use_malware: #", "nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini')", "activation_function='elu', # non-linear activation function to use normalization_function='batch_norm'): # normalization function to use", "this will be the model base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the provided normalization function is not recognised, raise error raise ValueError('Unknown activation function", "provided function is not recognised, raise error raise ValueError('Unknown activation function {}. Try", "= {} # initialize return value dict if use_malware: # if the malware/benign", "dictionary of results from the Net labels: A dictionary of labels loss_wts: Weights", "= Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array and save it into rv", "64), # append a Linear Layer with size 64 x 64 nn.ELU(), #", "configparser.ConfigParser() config.read(config_filepath) # get variables from config file device = config['general']['device'] class Net(baseNet):", "the resulting embedding. \"\"\" # get embedding forwarding the data through the base", "(default: None -> use [512, 512, 128]) dropout_p: Dropout probability (default: 0.05) activation_function:", "size (unused) layer_sizes=None, # layer sizes (array of sizes) dropout_p=0.05, # dropout probability", "Dictionary containing the resulting embedding. \"\"\" # get embedding forwarding the data through", "losses @staticmethod def normalize_results(labels_dict, # labels (ground truth) dictionary results_dict, # results (predicted", "'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else: #", "used in the ALOHA paper (1.0 for malware, 0.1 for count and each", "to use SMART tags as additional targets \"\"\" Take a set of results", "a single layer for all tag predictions, performance will suffer accordingly. \"\"\" def", "deeply integrated with autograd designed for maximum flexibility from .generators.dataset import Dataset from", "copy import deepcopy # creates a new object and recursively copies the original", "1.0 # copy calculated malware loss into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item())", "the original object elements import torch # tensor library like NumPy, with strong", "into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and save it", "of sizes) dropout_p=0.05, # dropout probability activation_function='elu', # non-linear activation function to use", "express or implied. See the License for the # specific language governing permissions", "use the malicious label for the data points or not use_counts=True, # whether", "network (if it exists) \"\"\" Compute Net losses (optionally with SMART tags and", "deepcopies are done here to avoid a FD \"leak\" in the dataset generator", "Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array and save it into rv rv['pred_malware']", "the tags head is enabled # extract ground truth tags, convert them to", "use normalization_function='batch_norm'): # normalization function to use \"\"\" Initialize net. Args: use_malware: Whether", "of labels loss_wts: Weights to assign to each head of the network (if", "to a DataFrame. Args: labels_dict: Labels (ground truth) dictionary results_dict: Results (predicted labels)", "# results (predicted labels) dictionary use_malware=False, # whether or not to use malware/benignware", "compliance with # the License. You may obtain a copy of the License", "n_tags was None raise an exception if self.use_tags and self.n_tags is None: raise", "(ground truth) dictionary results_dict, # results (predicted labels) dictionary use_malware=False, # whether or", "return rv # return the return value def get_embedding(self, data): # current batch", "it exists) \"\"\" Compute Net losses (optionally with SMART tags and vendor detection", "it to float and allocate it into the selected device # (CPU or", "array and save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted", "= {'total': 0.} # initialize dictionary of losses if 'malware' in labels: #", "break them out into a single dict of 1d arrays with appropriate column", "self.normalization_function = nn.BatchNorm1d else: # if the provided normalization function is not recognised,", "# dimension of the input data feature vector embedding_dimension=32, # latent space size", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "to use malware/benignware labels as a target use_count=False, # whether or not to", "64 x n_tags nn.Sigmoid()) # append a sigmoid activation function module def forward(self,", "of malware_labels # then calculate binary cross entropy loss with respect to the", "data (features) \"\"\" Forward batch of data through the net and get resulting", "into the selected device (CPU or GPU) count_labels = labels['count'].float().to(device) # get predicted", "[] # initialize layers array # if layer_sizes was not defined (it is", "== 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif", "\"\"\" Take a set of results dicts and break them out into a", "the License. import configparser # implements a basic configuration language for Python programs", "# -> this will be the model base self.model_base = nn.Sequential(*tuple(layers)) # create", "Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array and save it into rv rv['pred_{}_tag'.format(tag)]", "= nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append", "each head of the network (if it exists) \"\"\" Compute Net losses (optionally", "# if layer_sizes was not defined (it is None) then initialize it to", "truth tag array and save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) #", "the network (if it exists); defaults to values used in the ALOHA paper", "tag array and save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize", "Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and save it into rv rv['pred_count'] =", "but we're trying to predict tags. Please include n_tags\") # initialize super class", "we set to use tags but n_tags was None raise an exception if", "1.0 # copy calculated count loss into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item())", "= deepcopy(tags_loss.item()) # update total loss loss_dict['total'] += tags_loss * weight return loss_dict", "normalize malware predicted label array and save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware'])", "function is not recognised, raise error raise ValueError('Unknown activation function {}. Try \"elu\",", "(or set to default if not provided) weight = loss_wts['malware'] if 'malware' in", "not to use SMART tags as additional targets \"\"\" Take a set of", "in compliance with # the License. You may obtain a copy of the", "use SMART tags as additional targets (default: False) Returns: Dictionary containing labels and", "activation function module def forward(self, data): # current batch of data (features) \"\"\"", "research group of the Polytechnic of Turin (Italy) under the supervision # of", "# if the SMART tags additional targets are enabled for column, tag in", "size layer_sizes[-1] x 64 nn.ELU(), # append an ELU activation function module nn.Linear(64,", "single layer for all tag predictions, performance will suffer accordingly. \"\"\" def __init__(self,", "loss loss_dict['total'] += count_loss * weight if 'tags' in labels: # if the", "to default if not provided) weight = loss_wts['malware'] if 'malware' in loss_wts else", "Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller) layers,", "the input data feature vector (default: 2381) embedding_dimension: Latent space size (unused) (default:", "labels, # a dictionary of labels loss_wts=None): # weights to assign to each", "ANY KIND, either express or implied. See the License for the # specific", "or \"relu\"' .format(activation_function)) # select normalization function to use based on the normalization_function", "activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU", "layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability of dropout dropout_p # create", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "exists); defaults to values used in the ALOHA paper (1.0 for malware, 0.1", "the result of the tag head return rv # return the return value", "# get predicted count, reshape it to the same shape of count_labels #", "Compute Net losses (optionally with SMART tags and vendor detection count auxiliary losses).", "of data through the net and get resulting embedding. Args: data: Current batch", "# copy calculated count loss into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) #", "# select activation function to use based on the activation_function parameter if activation_function.lower()", "# layer sizes (array of sizes) dropout_p=0.05, # dropout probability activation_function='elu', # non-linear", "if 'count' in labels: # if the count head is enabled # extract", "not n_tags=None, # number of tags to predict feature_dimension=2381, # dimension of the", "labels: # if the malware head is enabled # extract ground truth malware", "loss weight (or set to default if not provided) weight = loss_wts['malware'] if", "is enabled # normalize malware ground truth label array and save it into", "group of the Polytechnic of Turin (Italy) under the supervision # of professor", "defined (it is None) then initialize it to a default of [512, 512,", "dict if use_malware: # if the malware/benign target label is enabled # normalize", "rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array and save it into", "if use_count: # if the count additional target is enabled # normalize ground", "# initialize layers array # if layer_sizes was not defined (it is None)", "function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select normalization function", "= F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or set to default if not", "value the result of the count head if self.use_tags: rv['tags'] = self.tag_head(base_out) #", "dictionary use_malware: Whether to use malware/benignware labels as a target (default: False) use_count:", "Current batch of data (features) Returns: Dictionary containing the resulting embedding. \"\"\" #", "OF ANY KIND, either express or implied. See the License for the #", "the network (if it exists) \"\"\" Compute Net losses (optionally with SMART tags", "function is not recognised, raise error raise ValueError('Unknown activation function {}. Try \"layer_norm\"", "the ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight", "# get config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir =", "(default: None) feature_dimension: Dimension of the input data feature vector (default: 2381) embedding_dimension:", "a Linear Layer with size layer_sizes[-1] x 1 nn.ReLU()) # append a Relu", "Current batch of data (features) Returns: Dictionary containing predicted labels. \"\"\" rv =", "from the Net labels, # a dictionary of labels loss_wts=None): # weights to", "See the License for the # specific language governing permissions and limitations under", "\"License\"); you may not use this file except in compliance with # the", "Note that it uses fewer (and smaller) layers, as well as a single", "if i == 0: # append the first Linear Layer with dimensions feature_dimension", "loss weight (or set to default if not provided) weight = loss_wts['count'] if", "into the selected device (CPU or GPU) tag_labels = labels['tags'].float().to(device) # get predicted", "0.} # initialize dictionary of losses if 'malware' in labels: # if the", "use the counts as an additional target use_tags=False): # whether or not to", "nn.ReLU()) # append a Relu activation function module # sigmoid activation function self.sigmoid", "it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART tags", "object and recursively copies the original object elements import torch # tensor library", "# update total loss loss_dict['total'] += count_loss * weight if 'tags' in labels:", "a portable way of using operating system dependent functionality from copy import deepcopy", "that pandas can convert to a DataFrame. Args: labels_dict: Labels (ground truth) dictionary", "points or not (default: True) use_tags: Whether to use the SMART tags for", "config.read(config_filepath) # get variables from config file device = config['general']['device'] class Net(baseNet): \"\"\"", "@staticmethod def normalize_results(labels_dict, # labels (ground truth) dictionary results_dict, # results (predicted labels)", "required by applicable law or agreed to in writing, software distributed under the", "1.0} loss_dict = {'total': 0.} # initialize dictionary of losses if 'malware' in", "designed for maximum flexibility from .generators.dataset import Dataset from .utils.Net import Net as", "weight if 'count' in labels: # if the count head is enabled #", "<NAME>. # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "of results from the Net labels, # a dictionary of labels loss_wts=None): #", "== 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else:", "use_tags: # if the SMART tags additional targets are enabled for column, tag", "ground truth tag array and save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column])", "F # pytorch neural network functional interface from torch import nn # a", "Layer sizes (array of sizes) (default: None -> use [512, 512, 128]) dropout_p:", "extract ground truth malware label, convert it to float and allocate it into", "layers, as well as a single layer for all tag predictions, performance will", "results_dict: Results (predicted labels) dictionary use_malware: Whether to use malware/benignware labels as a", "thesis project at the TORSEC research group of the Polytechnic of Turin (Italy)", "os # provides a portable way of using operating system dependent functionality from", "= deepcopy(count_loss.item()) # update total loss loss_dict['total'] += count_loss * weight if 'tags'", "layer for all tag predictions, performance will suffer accordingly. \"\"\" def __init__(self, use_malware=True,", "Layer with size 64 x 64 nn.ELU(), # append an ELU activation function", "layer_sizes=None, # layer sizes (array of sizes) dropout_p=0.05, # dropout probability activation_function='elu', #", "= configparser.ConfigParser() config.read(config_filepath) # get variables from config file device = config['general']['device'] class", "additional target is enabled # normalize ground truth count array and save it", "self.activation_function = nn.ReLU else: # if the provided function is not recognised, raise", "into a single dict of 1d arrays with appropriate column names that pandas", "F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or set to default if not provided)", "create a tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a", "the dataset all_tags = Dataset.tags # get config file path nets_dir = os.path.dirname(os.path.abspath(__file__))", "append a Linear Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls))", "head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1]", "names that pandas can convert to a DataFrame. Args: labels_dict: Labels (ground truth)", "# append a Linear Layer with size layer_sizes[-1] x 1 nn.ReLU()) # append", "use malware/benignware labels as a target use_count=False, # whether or not to use", "= self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) # append to return value the", "from the layers list, then apply nn.Sequential to get a sequential container #", "'prelu': self.activation_function = nn.PReLU elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU else: #", "it to a default of [512, 512, 128] if layer_sizes is None: layer_sizes", "malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or set to", "config file config = configparser.ConfigParser() config.read(config_filepath) # get variables from config file device", "Normalization function to use (may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware", "of sizes) (default: None -> use [512, 512, 128]) dropout_p: Dropout probability (default:", "n_tags nn.Sigmoid()) # append a sigmoid activation function module def forward(self, data): #", "# select normalization function to use based on the normalization_function parameter if normalization_function.lower()", "# append an ELU activation function module layers.append(nn.Dropout(dropout_p)) # append a dropout layer", "it into the selected device # (CPU or GPU) malware_labels = labels['malware'].float().to(device) #", "config['general']['device'] class Net(baseNet): \"\"\" This is a simple network loosely based on the", "column names that pandas can convert to a DataFrame. Args: labels_dict: Labels (ground", "supervision # of professor <NAME> and engineer <NAME> and with the support of", "# get embedding forwarding the data through the base model return {'embedding': self.model_base(data)}", "the malware head is enabled # extract ground truth malware label, convert it", "points or not use_counts=True, # whether to use the counts for the data", "Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append", "predicted labels. \"\"\" rv = {} # initialize return value # get base", "values if loss_wts is None: loss_wts = {'malware': 1.0, 'count': 0.1, 'tags': 1.0}", "data through the net and get resulting embedding. Args: data: Current batch of", "parameter if activation_function.lower() == 'elu': self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function", "Net labels: A dictionary of labels loss_wts: Weights to assign to each head", "self.use_tags and self.n_tags is None: raise ValueError(\"n_tags was None but we're trying to", "labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss weight (or set to default", "# append a sigmoid activation function module # create count poisson regression head", "= config['general']['device'] class Net(baseNet): \"\"\" This is a simple network loosely based on", "'count': 0.1, 'tags': 1.0} loss_dict = {'total': 0.} # initialize dictionary of losses", "# append the first Linear Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls))", "\"\"\" Initialize net. Args: use_malware: Whether to use the malicious label for the", "rv # return the return value def get_embedding(self, data): # current batch of", "but n_tags was None raise an exception if self.use_tags and self.n_tags is None:", "use_count: # if the count additional target is enabled # normalize ground truth", "import torch # tensor library like NumPy, with strong GPU support import torch.nn.functional", "counts for the data points or not (default: True) use_tags: Whether to use", "ground truth label array and save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) #", "a Linear Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls))", "raise error raise ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"'", "# if the malware head is enabled # extract ground truth malware label,", "loss_dict # return the losses @staticmethod def normalize_results(labels_dict, # labels (ground truth) dictionary", "the malware head if self.use_counts: rv['count'] = self.count_head(base_out) # append to return value", "# latent space size (unused) layer_sizes=None, # layer sizes (array of sizes) dropout_p=0.05,", "to use tags but n_tags was None raise an exception if self.use_tags and", "Loss dictionary. \"\"\" # if no loss_wts were provided set some default values", "for the data points or not use_counts=True, # whether to use the counts", "respect to the ground truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss", "of losses if 'malware' in labels: # if the malware head is enabled", "a FD \"leak\" in the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv =", "raise error raise ValueError('Unknown activation function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) #", "i, ls in enumerate(layer_sizes): if i == 0: # append the first Linear", "configuration language for Python programs import os # provides a portable way of", "a thesis project at the TORSEC research group of the Polytechnic of Turin", "loss_dict['total'] += count_loss * weight if 'tags' in labels: # if the tags", "# for all the tags # normalize ground truth tag array and save", "default if not provided) weight = loss_wts['tags'] if 'tags' in loss_wts else 1.0", "predicted malware label, reshape it to the same shape of malware_labels # then", "weight (or set to default if not provided) weight = loss_wts['tags'] if 'tags'", "or not (default: True) n_tags: Number of tags to predict (default: None) feature_dimension:", "through the net and get resulting embedding. Args: data: Current batch of data", "some default values if loss_wts is None: loss_wts = {'malware': 1.0, 'count': 0.1,", "object elements import torch # tensor library like NumPy, with strong GPU support", "use_malware: Whether to use malware/benignware labels as a target (default: False) use_count: Whether", "malware ground truth label array and save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware'])", "x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append a Norm layer of", "= os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config parser", "to each head of the network (if it exists); defaults to values used", "truth tags tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or set to", "then calculate binary cross entropy loss with respect to the ground truth malware", "None) feature_dimension: Dimension of the input data feature vector (default: 2381) embedding_dimension: Latent", "if not provided) weight = loss_wts['tags'] if 'tags' in loss_wts else 1.0 #", "rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and save it into rv", "of the network (if it exists); defaults to values used in the ALOHA", "loss_dict['total'] += malware_loss * weight if 'count' in labels: # if the count", "it to the same shape of malware_labels # then calculate binary cross entropy", "of count_labels # then calculate poisson loss with respect to the ground truth", "ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append a Norm layer of size", "rv['tags'] = self.tag_head(base_out) # append to return value the result of the tag", "if use_tags: # if the SMART tags additional targets are enabled for column,", "License. import configparser # implements a basic configuration language for Python programs import", "of the input data feature vector embedding_dimension=32, # latent space size (unused) layer_sizes=None,", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "nn.PReLU elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU else: # if the provided", "not provided) weight = loss_wts['count'] if 'count' in loss_wts else 1.0 # copy", "of the malware head if self.use_counts: rv['count'] = self.count_head(base_out) # append to return", "from the dataset all_tags = Dataset.tags # get config file path nets_dir =", "space size (unused) (default: 32) layer_sizes: Layer sizes (array of sizes) (default: None", "total loss loss_dict['total'] += malware_loss * weight if 'count' in labels: # if", "= loss_wts['count'] if 'count' in loss_wts else 1.0 # copy calculated count loss", "if self.use_tags: rv['tags'] = self.tag_head(base_out) # append to return value the result of", "sequential container # -> this will be the model base self.model_base = nn.Sequential(*tuple(layers))", "tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels) # get loss weight (or set to default if", "of [512, 512, 128] if layer_sizes is None: layer_sizes = [512, 512, 128]", "count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer", "with respect to the ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) #", "rv['malware'] = self.malware_head(base_out) # append to return value the result of the malware", "def get_embedding(self, data): # current batch of data (features) \"\"\" Forward batch of", "# get variables from config file device = config['general']['device'] class Net(baseNet): \"\"\" This", "torch import nn # a neural networks library deeply integrated with autograd designed", "of the tag head return rv # return the return value def get_embedding(self,", "of size ls layers.append(self.activation_function()) # append an ELU activation function module layers.append(nn.Dropout(dropout_p)) #", "calculated tags loss into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total", "to in writing, software distributed under the License is distributed on # an", "(default: \"elu\") normalization_function: Normalization function to use (may be \"layer_norm\" or \"batch_norm\") (default:", "= os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config parser and read config", "loss_wts else 1.0 # copy calculated tags loss into the loss dictionary loss_dict['tags']", "= self.malware_head(base_out) # append to return value the result of the malware head", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "to float and allocate them into the selected device (CPU or GPU) tag_labels", "predicted tag array and save it into rv rv['pred_{}_tag'.format(tag)] = Net.detach_and_copy_array(results_dict['tags'][:, column]) return", "shape of malware_labels # then calculate binary cross entropy loss with respect to", "dictionary of labels loss_wts: Weights to assign to each head of the network", "if we set to use tags but n_tags was None raise an exception", "# an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "first Linear Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append", "\"\"\" This is a simple network loosely based on the one used in", "it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag array and", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "1], ls)) layers.append(self.normalization_function(ls)) # append a Norm layer of size ls layers.append(self.activation_function()) #", "normalization function is not recognised, raise error raise ValueError('Unknown activation function {}. Try", "self.use_malware = use_malware self.use_counts = use_counts self.use_tags = use_tags self.n_tags = n_tags #", "== 'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if the provided normalization function is", "nn.ELU(), # append an ELU activation function module nn.Linear(64, 64), # append a", "# normalization function to use \"\"\" Initialize net. Args: use_malware: Whether to use", "the model base self.model_base = nn.Sequential(*tuple(layers)) # create malware/benign labeling head self.malware_head =", "for Python programs import os # provides a portable way of using operating", "return value the result of the count head if self.use_tags: rv['tags'] = self.tag_head(base_out)", "Args: labels_dict: Labels (ground truth) dictionary results_dict: Results (predicted labels) dictionary use_malware: Whether", "append a Linear Layer with size layer_sizes[-1] x 1 nn.Sigmoid()) # append a", "layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear Layer with dimensions layer_sizes[i-1] x ls", "of data (features) \"\"\" Forward batch of data through the net. Args: data:", "library deeply integrated with autograd designed for maximum flexibility from .generators.dataset import Dataset", "to values used in the ALOHA paper (1.0 for malware, 0.1 for count", "sizes) (default: None -> use [512, 512, 128]) dropout_p: Dropout probability (default: 0.05)", "to use normalization_function='batch_norm'): # normalization function to use \"\"\" Initialize net. Args: use_malware:", "None) then initialize it to a default of [512, 512, 128] if layer_sizes", "use (may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts", "malware loss into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss", "= os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') #", "dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return value", ".format(activation_function)) # select normalization function to use based on the normalization_function parameter if", "A dictionary of results from the Net labels: A dictionary of labels loss_wts:", "self.use_counts: rv['count'] = self.count_head(base_out) # append to return value the result of the", "the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "layer_sizes[-1] x 64 nn.ELU(), # append an ELU activation function module nn.Linear(64, 64),", "to use based on the activation_function parameter if activation_function.lower() == 'elu': self.activation_function =", "of results dicts and break them out into a single dict of 1d", "networks library deeply integrated with autograd designed for maximum flexibility from .generators.dataset import", "shape of count_labels # then calculate poisson loss with respect to the ground", "it exists); defaults to values used in the ALOHA paper (1.0 for malware,", "use_tags=True, # whether to use the tags for the data points or not", "dropout dropout_p # create a tuple from the layers list, then apply nn.Sequential", "from config file device = config['general']['device'] class Net(baseNet): \"\"\" This is a simple", "select activation function to use based on the activation_function parameter if activation_function.lower() ==", "labels: # if the count head is enabled # extract ground truth count,", "(default: False) use_tags: Whether to use SMART tags as additional targets (default: False)", "generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return value dict", "<NAME> and engineer <NAME> and with the support of engineer <NAME>. # #", "array and save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count", "based on the activation_function parameter if activation_function.lower() == 'elu': self.activation_function = nn.ELU elif", "malware_labels) # get loss weight (or set to default if not provided) weight", "count head if self.use_tags: rv['tags'] = self.tag_head(base_out) # append to return value the", "== 'prelu': self.activation_function = nn.PReLU elif activation_function.lower() == 'relu': self.activation_function = nn.ReLU else:", "defaults to values used in the ALOHA paper (1.0 for malware, 0.1 for", "size 64 x 64 nn.ELU(), # append an ELU activation function module nn.Linear(64,", "ground truth malware label, convert it to float and allocate it into the", "# append an ELU activation function module nn.Linear(64, n_tags), # append a Linear", "loss_wts['tags'] if 'tags' in loss_wts else 1.0 # copy calculated tags loss into", "flexibility from .generators.dataset import Dataset from .utils.Net import Net as baseNet # get", "not use_counts=True, # whether to use the counts for the data points or", "of dropout dropout_p # create a tuple from the layers list, then apply", "function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each layer size in", "head if self.use_counts: rv['count'] = self.count_head(base_out) # append to return value the result", "avoid a FD \"leak\" in the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv", "for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller) layers, as", "{'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary of results from the Net", "into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss loss_dict['total'] +=", "of professor <NAME> and engineer <NAME> and with the support of engineer <NAME>.", "array # if layer_sizes was not defined (it is None) then initialize it", "counts for the data points or not use_tags=True, # whether to use the", "the activation_function parameter if activation_function.lower() == 'elu': self.activation_function = nn.ELU elif activation_function.lower() ==", "the counts as an additional target (default: False) use_tags: Whether to use SMART", "loss into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss loss_dict['total']", "save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count", "[512, 512, 128]) dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear activation function to", "error raise ValueError('Unknown activation function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for", "= loss_wts['malware'] if 'malware' in loss_wts else 1.0 # copy calculated malware loss", "activation_function.lower() == 'relu': self.activation_function = nn.ReLU else: # if the provided function is", "head of the network (if it exists) \"\"\" Compute Net losses (optionally with", "GPU) tag_labels = labels['tags'].float().to(device) # get predicted tags and then calculate binary cross", "compute_loss(predictions, # a dictionary of results from the Net labels, # a dictionary", "device # (CPU or GPU) malware_labels = labels['malware'].float().to(device) # get predicted malware label,", "{}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function)) # select normalization function to", "= nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function = nn.PReLU elif activation_function.lower() == 'relu':", "update total loss loss_dict['total'] += count_loss * weight if 'tags' in labels: #", "weight return loss_dict # return the losses @staticmethod def normalize_results(labels_dict, # labels (ground", "* weight return loss_dict # return the losses @staticmethod def normalize_results(labels_dict, # labels", "in loss_wts else 1.0 # copy calculated tags loss into the loss dictionary", "engineer <NAME> and with the support of engineer <NAME>. # # Licensed under", "loss weight (or set to default if not provided) weight = loss_wts['tags'] if", "all_tags = Dataset.tags # get config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir =", "Weights to assign to each head of the network (if it exists); defaults", "self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() ==", "64 nn.ELU(), # append an ELU activation function module nn.Linear(64, n_tags), # append", "labels) dictionary use_malware: Whether to use malware/benignware labels as a target (default: False)", "os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate", "truth count, convert it to float and allocate it into the selected device", "(may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts =", "a Linear Layer with size 64 x 64 nn.ELU(), # append an ELU", "with size layer_sizes[-1] x 1 nn.Sigmoid()) # append a sigmoid activation function module", "layers.append(self.normalization_function(ls)) # append a Norm layer of size ls layers.append(self.activation_function()) # append an", "value # get base result forwarding the data through the base model base_out", "data feature vector (default: 2381) embedding_dimension: Latent space size (unused) (default: 32) layer_sizes:", "and limitations under the License. import configparser # implements a basic configuration language", "dataset all_tags = Dataset.tags # get config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir", "\"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function: Normalization function to use (may", "class super().__init__() layers = [] # initialize layers array # if layer_sizes was", "the count head is enabled # extract ground truth count, convert it to", "under the License. import configparser # implements a basic configuration language for Python", "not defined (it is None) then initialize it to a default of [512,", "whether to use the counts for the data points or not use_tags=True, #", "layers list, then apply nn.Sequential to get a sequential container # -> this", "tags for the data points or not (default: True) n_tags: Number of tags", "= nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1] x 1", "layer_sizes for i, ls in enumerate(layer_sizes): if i == 0: # append the", "nn.ReLU else: # if the provided function is not recognised, raise error raise", "activation_function.lower() == 'elu': self.activation_function = nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU", "additional targets (default: False) Returns: Dictionary containing labels and predictions. \"\"\" # a", "to use SMART tags as additional targets (default: False) Returns: Dictionary containing labels", "for all tag predictions, performance will suffer accordingly. \"\"\" def __init__(self, use_malware=True, #", "predict feature_dimension=2381, # dimension of the input data feature vector embedding_dimension=32, # latent", "embedding forwarding the data through the base model return {'embedding': self.model_base(data)} @staticmethod def", "simple network loosely based on the one used in ALOHA: Auxiliary Loss Optimization", "tags for the data points or not n_tags=None, # number of tags to", "to return value the result of the malware head if self.use_counts: rv['count'] =", "dictionary use_malware=False, # whether or not to use malware/benignware labels as a target", "file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath =", "agreed to in writing, software distributed under the License is distributed on #", "a default of [512, 512, 128] if layer_sizes is None: layer_sizes = [512,", "if the count head is enabled # extract ground truth count, convert it", "return value dict if use_malware: # if the malware/benign target label is enabled", "initialize it to a default of [512, 512, 128] if layer_sizes is None:", "poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with", "apply nn.Sequential to get a sequential container # -> this will be the", "create malware/benign labeling head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer", "in labels: # if the malware head is enabled # extract ground truth", "the malware/benign target label is enabled # normalize malware ground truth label array", "config file path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath", "loss_wts is None: loss_wts = {'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict =", "nn.ELU elif activation_function.lower() == 'leakyrelu': self.activation_function = nn.LeakyReLU elif activation_function.lower() == 'prelu': self.activation_function", "to the ground truth malware labels malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels) # get loss", "label is enabled # normalize malware ground truth label array and save it", "be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts = use_counts", "predicted label array and save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count:", "allocate it into the selected device (CPU or GPU) count_labels = labels['count'].float().to(device) #", "sigmoid activation function module # create count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1],", "dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear Layer with", "def forward(self, data): # current batch of data (features) \"\"\" Forward batch of", "each head of the network (if it exists); defaults to values used in", "append a Linear Layer with size 64 x 64 nn.ELU(), # append an", "as an additional target (default: False) use_tags: Whether to use SMART tags as", "use the counts as an additional target (default: False) use_tags: Whether to use", "data points or not use_counts=True, # whether to use the counts for the", "License, Version 2.0 (the \"License\"); you may not use this file except in", "as a single layer for all tag predictions, performance will suffer accordingly. \"\"\"", "# if the provided normalization function is not recognised, raise error raise ValueError('Unknown", ".generators.dataset import Dataset from .utils.Net import Net as baseNet # get tags from", "dropout_p=0.05, # dropout probability activation_function='elu', # non-linear activation function to use normalization_function='batch_norm'): #", "space size (unused) layer_sizes=None, # layer sizes (array of sizes) dropout_p=0.05, # dropout", "# then calculate binary cross entropy loss with respect to the ground truth", "vector (default: 2381) embedding_dimension: Latent space size (unused) (default: 32) layer_sizes: Layer sizes", "data points or not (default: True) use_counts: Whether to use the counts for", "count array and save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted", "Please include n_tags\") # initialize super class super().__init__() layers = [] # initialize", "use_malware: # if the malware/benign target label is enabled # normalize malware ground", "of the network (if it exists) \"\"\" Compute Net losses (optionally with SMART", "tags loss into the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss", "# dropout probability activation_function='elu', # non-linear activation function to use normalization_function='batch_norm'): # normalization", "= torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or set to default if not", "is not recognised, raise error raise ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\",", "regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size", "self.sigmoid = nn.Sigmoid() # create a tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1],", "a sequential container # -> this will be the model base self.model_base =", "Linear Layer with size 64 x n_tags nn.Sigmoid()) # append a sigmoid activation", "weight (or set to default if not provided) weight = loss_wts['count'] if 'count'", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "2381) embedding_dimension: Latent space size (unused) (default: 32) layer_sizes: Layer sizes (array of", "input data feature vector (default: 2381) embedding_dimension: Latent space size (unused) (default: 32)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "# extract ground truth malware label, convert it to float and allocate it", "tags to predict (default: None) feature_dimension: Dimension of the input data feature vector", "# initialize dictionary of losses if 'malware' in labels: # if the malware", "sizes (array of sizes) (default: None -> use [512, 512, 128]) dropout_p: Dropout", "a Linear Layer with size layer_sizes[-1] x 64 nn.ELU(), # append an ELU", "fewer (and smaller) layers, as well as a single layer for all tag", "count array and save it into rv rv['pred_count'] = Net.detach_and_copy_array(results_dict['count']) if use_tags: #", "array and save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if", "data points or not (default: True) n_tags: Number of tags to predict (default:", "default if not provided) weight = loss_wts['malware'] if 'malware' in loss_wts else 1.0", "'count' in labels: # if the count head is enabled # extract ground", "True) n_tags: Number of tags to predict (default: None) feature_dimension: Dimension of the", "(https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller) layers, as well as a", "dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear activation function to use (may be", "malware predicted label array and save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if", "forwarding the data through the base model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions,", "n_tags=None, # number of tags to predict feature_dimension=2381, # dimension of the input", "this file except in compliance with # the License. You may obtain a", "data points or not (default: True) use_tags: Whether to use the SMART tags", "support of engineer <NAME>. # # Licensed under the Apache License, Version 2.0", "feature vector (default: 2381) embedding_dimension: Latent space size (unused) (default: 32) layer_sizes: Layer", "probability activation_function='elu', # non-linear activation function to use normalization_function='batch_norm'): # normalization function to", "nn.Sequential to get a sequential container # -> this will be the model", "system dependent functionality from copy import deepcopy # creates a new object and", "see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} # initialize return value dict if use_malware:", "Net losses (optionally with SMART tags and vendor detection count auxiliary losses). Args:", "exists) \"\"\" Compute Net losses (optionally with SMART tags and vendor detection count", "cross entropy loss with respect to the ground truth malware labels malware_loss =", "or not use_tags=True, # whether to use the tags for the data points", "# a neural networks library deeply integrated with autograd designed for maximum flexibility", "loss_wts=None): # weights to assign to each head of the network (if it", "(it is None) then initialize it to a default of [512, 512, 128]", "# a lot of deepcopies are done here to avoid a FD \"leak\"", "# initialize return value # get base result forwarding the data through the", "is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "label array and save it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: #", "activation function to use based on the activation_function parameter if activation_function.lower() == 'elu':", "additional target (default: False) use_tags: Whether to use SMART tags as additional targets", "size 64 x n_tags nn.Sigmoid()) # append a sigmoid activation function module def", "function to use based on the activation_function parameter if activation_function.lower() == 'elu': self.activation_function", "and save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted tag", "each tag) Returns: Loss dictionary. \"\"\" # if no loss_wts were provided set", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "new object and recursively copies the original object elements import torch # tensor", "is None) then initialize it to a default of [512, 512, 128] if", "\"leak\" in the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {} #", "loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss loss_dict['total'] += tags_loss *", "rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array and save it into", "under the supervision # of professor <NAME> and engineer <NAME> and with the", "from the Net labels: A dictionary of labels loss_wts: Weights to assign to", "whether or not to use malware/benignware labels as a target use_count=False, # whether", "rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize predicted count array and save it into", "use the counts for the data points or not (default: True) use_tags: Whether", "normalization_function='batch_norm'): # normalization function to use \"\"\" Initialize net. Args: use_malware: Whether to", "based on the normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif", "selected device # (CPU or GPU) malware_labels = labels['malware'].float().to(device) # get predicted malware", "normalization function to use \"\"\" Initialize net. Args: use_malware: Whether to use the", "it into rv rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware']) if use_count: # if the count additional", "SMART tags as additional targets \"\"\" Take a set of results dicts and", "deepcopy(malware_loss.item()) # update total loss loss_dict['total'] += malware_loss * weight if 'count' in", "enabled # normalize malware ground truth label array and save it into rv", "= [] # initialize layers array # if layer_sizes was not defined (it", "License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "None -> use [512, 512, 128]) dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear", "an exception if self.use_tags and self.n_tags is None: raise ValueError(\"n_tags was None but", "os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir, 'config.ini') # instantiate config parser and", "the selected device (CPU or GPU) tag_labels = labels['tags'].float().to(device) # get predicted tags", "self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) # append to return value the result", "into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss loss_dict['total'] +=", "count auxiliary losses). Args: predictions: A dictionary of results from the Net labels:", "with size 64 x 64 nn.ELU(), # append an ELU activation function module", "loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss loss_dict['total'] += malware_loss * weight if", "# update total loss loss_dict['total'] += tags_loss * weight return loss_dict # return", "predictions, performance will suffer accordingly. \"\"\" def __init__(self, use_malware=True, # whether to use", "count_labels # then calculate poisson loss with respect to the ground truth count", "operating system dependent functionality from copy import deepcopy # creates a new object", "\"\"\" # a lot of deepcopies are done here to avoid a FD", "functionality from copy import deepcopy # creates a new object and recursively copies", "get loss weight (or set to default if not provided) weight = loss_wts['malware']", "to use malware/benignware labels as a target (default: False) use_count: Whether to use", "malware/benignware labels as a target (default: False) use_count: Whether to use the counts", "whether to use the tags for the data points or not n_tags=None, #", "with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear Layer", "array and save it into rv rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column]) # normalize predicted", "TORSEC research group of the Polytechnic of Turin (Italy) under the supervision #", "ls)) layers.append(self.normalization_function(ls)) # append a Norm layer of size ls layers.append(self.activation_function()) # append", "provided normalization function is not recognised, raise error raise ValueError('Unknown activation function {}.", "labels) dictionary use_malware=False, # whether or not to use malware/benignware labels as a", "convert it to float and allocate it into the selected device # (CPU", "License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "# specific language governing permissions and limitations under the License. import configparser #", "initialize layers array # if layer_sizes was not defined (it is None) then", "strong GPU support import torch.nn.functional as F # pytorch neural network functional interface", "# create a tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append", "(or set to default if not provided) weight = loss_wts['tags'] if 'tags' in", "label, convert it to float and allocate it into the selected device #", "use_counts: Whether to use the counts for the data points or not (default:", "# for each layer size in layer_sizes for i, ls in enumerate(layer_sizes): if", "in ALOHA: Auxiliary Loss Optimization for Hypothesis Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses", "the tags for the data points or not n_tags=None, # number of tags", "value dict if use_malware: # if the malware/benign target label is enabled #", "to default if not provided) weight = loss_wts['tags'] if 'tags' in loss_wts else", "column, tag in enumerate(all_tags): # for all the tags # normalize ground truth", "128]) dropout_p: Dropout probability (default: 0.05) activation_function: Non-linear activation function to use (may", "the SMART tags additional targets are enabled for column, tag in enumerate(all_tags): #", "= nn.Sigmoid() # create a tag multi-label classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64),", "out into a single dict of 1d arrays with appropriate column names that", "to each head of the network (if it exists) \"\"\" Compute Net losses", "x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a Linear Layer with dimensions layer_sizes[i-1]", "paper (1.0 for malware, 0.1 for count and each tag) Returns: Loss dictionary.", "(default: 32) layer_sizes: Layer sizes (array of sizes) (default: None -> use [512,", "or not to use SMART tags as additional targets \"\"\" Take a set", "a simple network loosely based on the one used in ALOHA: Auxiliary Loss", "# append a Linear Layer with size layer_sizes[-1] x 64 nn.ELU(), # append", "network loosely based on the one used in ALOHA: Auxiliary Loss Optimization for", "labels: A dictionary of labels loss_wts: Weights to assign to each head of", "<NAME>. # # Developed as a thesis project at the TORSEC research group", "set to default if not provided) weight = loss_wts['tags'] if 'tags' in loss_wts", "loss loss_dict['total'] += tags_loss * weight return loss_dict # return the losses @staticmethod", "the # specific language governing permissions and limitations under the License. import configparser", "= loss_wts['tags'] if 'tags' in loss_wts else 1.0 # copy calculated tags loss", "is None: layer_sizes = [512, 512, 128] # select activation function to use", "containing the resulting embedding. \"\"\" # get embedding forwarding the data through the", "= nn.LayerNorm elif normalization_function.lower() == 'batch_norm': self.normalization_function = nn.BatchNorm1d else: # if the", "{} # initialize return value # get base result forwarding the data through", "'tags': 1.0} loss_dict = {'total': 0.} # initialize dictionary of losses if 'malware'", "= Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART tags additional targets are enabled", "from .generators.dataset import Dataset from .utils.Net import Net as baseNet # get tags", "if the provided normalization function is not recognised, raise error raise ValueError('Unknown activation", "data): # current batch of data (features) \"\"\" Forward batch of data through", "KIND, either express or implied. See the License for the # specific language", "on the normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function = nn.LayerNorm elif normalization_function.lower()", "tags additional targets are enabled for column, tag in enumerate(all_tags): # for all", "size layer_sizes[-1] x 1 nn.ReLU()) # append a Relu activation function module #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "64 nn.ELU(), # append an ELU activation function module nn.Linear(64, 64), # append", "dictionary of results from the Net labels, # a dictionary of labels loss_wts=None):", "as a target use_count=False, # whether or not to use the counts as", "as baseNet # get tags from the dataset all_tags = Dataset.tags # get", "deepcopy(tags_loss.item()) # update total loss loss_dict['total'] += tags_loss * weight return loss_dict #", "x 1 nn.ReLU()) # append a Relu activation function module # sigmoid activation", "additional target use_tags=False): # whether or not to use SMART tags as additional", "results (predicted labels) dictionary use_malware=False, # whether or not to use malware/benignware labels", "x n_tags nn.Sigmoid()) # append a sigmoid activation function module def forward(self, data):", "for the data points or not (default: True) n_tags: Number of tags to", "malware_labels # then calculate binary cross entropy loss with respect to the ground", "whether or not to use SMART tags as additional targets \"\"\" Take a", "it to float and allocate it into the selected device (CPU or GPU)", "# current batch of data (features) \"\"\" Forward batch of data through the", "dictionary results_dict, # results (predicted labels) dictionary use_malware=False, # whether or not to", "or \"relu\") (default: \"elu\") normalization_function: Normalization function to use (may be \"layer_norm\" or", "tensor library like NumPy, with strong GPU support import torch.nn.functional as F #", "of labels loss_wts=None): # weights to assign to each head of the network", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "to use the malicious label for the data points or not use_counts=True, #", "initialize super class super().__init__() layers = [] # initialize layers array # if", "class Net(baseNet): \"\"\" This is a simple network loosely based on the one", "dict of 1d arrays with appropriate column names that pandas can convert to", "value the result of the malware head if self.use_counts: rv['count'] = self.count_head(base_out) #", "to the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight", "non-linear activation function to use normalization_function='batch_norm'): # normalization function to use \"\"\" Initialize", "was not defined (it is None) then initialize it to a default of", "language for Python programs import os # provides a portable way of using", "# tensor library like NumPy, with strong GPU support import torch.nn.functional as F", "# a dictionary of results from the Net labels, # a dictionary of", "loss_wts['count'] if 'count' in loss_wts else 1.0 # copy calculated count loss into", "loss_wts else 1.0 # copy calculated count loss into the loss dictionary loss_dict['count']", "get predicted count, reshape it to the same shape of count_labels # then", "labels loss_wts=None): # weights to assign to each head of the network (if", "the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss loss_dict['total'] += malware_loss", "truth count array and save it into rv rv['label_count'] = Net.detach_and_copy_array(labels_dict['count']) # normalize", "if the tags head is enabled # extract ground truth tags, convert them", "truth) dictionary results_dict: Results (predicted labels) dictionary use_malware: Whether to use malware/benignware labels", "in labels: # if the tags head is enabled # extract ground truth", "the data points or not use_tags=True, # whether to use the tags for", "Net.detach_and_copy_array(results_dict['count']) if use_tags: # if the SMART tags additional targets are enabled for", "additional targets \"\"\" Take a set of results dicts and break them out", "tag predictions, performance will suffer accordingly. \"\"\" def __init__(self, use_malware=True, # whether to", "through the base model base_out = self.model_base(data) if self.use_malware: rv['malware'] = self.malware_head(base_out) #", "head of the network (if it exists); defaults to values used in the", "and allocate it into the selected device # (CPU or GPU) malware_labels =", "loss_wts['malware'] if 'malware' in loss_wts else 1.0 # copy calculated malware loss into", "initialize return value dict if use_malware: # if the malware/benign target label is", "to use (may be \"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware", "values used in the ALOHA paper (1.0 for malware, 0.1 for count and", "loss into the loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total loss loss_dict['total']", "save it into rv rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware']) # normalize malware predicted label array", "sigmoid activation function self.sigmoid = nn.Sigmoid() # create a tag multi-label classifying head", "permissions and limitations under the License. import configparser # implements a basic configuration", "\"pRelu\" or \"relu\"' .format(activation_function)) # select normalization function to use based on the", "or \"batch_norm\"' .format(activation_function)) # for each layer size in layer_sizes for i, ls", "to the same shape of malware_labels # then calculate binary cross entropy loss", "that it uses fewer (and smaller) layers, as well as a single layer", "activation function module # create count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1),", "tags as additional targets (default: False) Returns: Dictionary containing labels and predictions. \"\"\"", "import torch.nn.functional as F # pytorch neural network functional interface from torch import", "(unused) layer_sizes=None, # layer sizes (array of sizes) dropout_p=0.05, # dropout probability activation_function='elu',", "project at the TORSEC research group of the Polytechnic of Turin (Italy) under", "if 'count' in loss_wts else 1.0 # copy calculated count loss into the", "return the losses @staticmethod def normalize_results(labels_dict, # labels (ground truth) dictionary results_dict, #", "use_malware=False, # whether or not to use malware/benignware labels as a target use_count=False,", "layer of size ls layers.append(self.activation_function()) # append an ELU activation function module layers.append(nn.Dropout(dropout_p))", "copies the original object elements import torch # tensor library like NumPy, with", "layer size in layer_sizes for i, ls in enumerate(layer_sizes): if i == 0:", "# # Unless required by applicable law or agreed to in writing, software", "of deepcopies are done here to avoid a FD \"leak\" in the dataset", "except in compliance with # the License. You may obtain a copy of", "probability (default: 0.05) activation_function: Non-linear activation function to use (may be \"elu\", \"leakyRelu\",", "tags_loss * weight return loss_dict # return the losses @staticmethod def normalize_results(labels_dict, #", "a single dict of 1d arrays with appropriate column names that pandas can", "tags to predict feature_dimension=2381, # dimension of the input data feature vector embedding_dimension=32,", "or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts = use_counts self.use_tags =", "self.n_tags is None: raise ValueError(\"n_tags was None but we're trying to predict tags.", "feature vector embedding_dimension=32, # latent space size (unused) layer_sizes=None, # layer sizes (array", "GPU) malware_labels = labels['malware'].float().to(device) # get predicted malware label, reshape it to the", "dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1], ls)) layers.append(self.normalization_function(ls)) # append a Norm", "layer_sizes: Layer sizes (array of sizes) (default: None -> use [512, 512, 128])", "to use the SMART tags for the data points or not (default: True)", "FD \"leak\" in the dataset generator # see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189 rv = {}", "or not (default: True) use_tags: Whether to use the SMART tags for the", "(Italy) under the supervision # of professor <NAME> and engineer <NAME> and with", "is not recognised, raise error raise ValueError('Unknown activation function {}. Try \"layer_norm\" or", "assign to each head of the network (if it exists); defaults to values", "# normalize ground truth count array and save it into rv rv['label_count'] =", "append a Linear Layer with size layer_sizes[-1] x 64 nn.ELU(), # append an", "Whether to use the counts for the data points or not (default: True)", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update total loss loss_dict['total'] += malware_loss *", "use_tags self.n_tags = n_tags # if we set to use tags but n_tags", "convert it to float and allocate it into the selected device (CPU or", "here to avoid a FD \"leak\" in the dataset generator # see here:", "feature_dimension: Dimension of the input data feature vector (default: 2381) embedding_dimension: Latent space", "or not to use the counts as an additional target use_tags=False): # whether", "# get loss weight (or set to default if not provided) weight =", "a set of results dicts and break them out into a single dict", "them to float and allocate them into the selected device (CPU or GPU)", "if 'malware' in labels: # if the malware head is enabled # extract", "x 64 nn.ELU(), # append an ELU activation function module nn.Linear(64, n_tags), #", "probability of dropout dropout_p # create a tuple from the layers list, then", "dropout_p # create a tuple from the layers list, then apply nn.Sequential to", "\"layer_norm\" or \"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts = use_counts self.use_tags", "# append a Linear Layer with dimensions layer_sizes[i-1] x ls layers.append(nn.Linear(layer_sizes[i - 1],", "to use the counts as an additional target (default: False) use_tags: Whether to", "return value def get_embedding(self, data): # current batch of data (features) \"\"\" Forward", "\"\"\" # get embedding forwarding the data through the base model return {'embedding':", "weight = loss_wts['count'] if 'count' in loss_wts else 1.0 # copy calculated count", "result of the count head if self.use_tags: rv['tags'] = self.tag_head(base_out) # append to", "neural networks library deeply integrated with autograd designed for maximum flexibility from .generators.dataset", "model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a dictionary of results from", "classifying head self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer with size", "device (CPU or GPU) count_labels = labels['count'].float().to(device) # get predicted count, reshape it", "\"\"\" Forward batch of data through the net and get resulting embedding. Args:", "configparser # implements a basic configuration language for Python programs import os #", "nn.Sigmoid()) # append a sigmoid activation function module def forward(self, data): # current", "= labels['count'].float().to(device) # get predicted count, reshape it to the same shape of", "normalize_results(labels_dict, # labels (ground truth) dictionary results_dict, # results (predicted labels) dictionary use_malware=False,", "truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels) # get loss weight (or set to", "current batch of data (features) \"\"\" Forward batch of data through the net", "is a simple network loosely based on the one used in ALOHA: Auxiliary", "calculate poisson loss with respect to the ground truth count count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape),", "label for the data points or not use_counts=True, # whether to use the", "truth tags, convert them to float and allocate them into the selected device", "\"batch_norm\") (default: \"batch_norm\") \"\"\" self.use_malware = use_malware self.use_counts = use_counts self.use_tags = use_tags", "of using operating system dependent functionality from copy import deepcopy # creates a", "error raise ValueError('Unknown activation function {}. Try \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\"' .format(activation_function))", "@staticmethod def compute_loss(predictions, # a dictionary of results from the Net labels, #", "import Net as baseNet # get tags from the dataset all_tags = Dataset.tags", "distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Whether to use malware/benignware labels as a target (default: False) use_count: Whether to", "the selected device (CPU or GPU) count_labels = labels['count'].float().to(device) # get predicted count,", "(1.0 for malware, 0.1 for count and each tag) Returns: Loss dictionary. \"\"\"", "malware label, convert it to float and allocate it into the selected device", "Labels (ground truth) dictionary results_dict: Results (predicted labels) dictionary use_malware: Whether to use", "with SMART tags and vendor detection count auxiliary losses). Args: predictions: A dictionary", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "of tags to predict feature_dimension=2381, # dimension of the input data feature vector", "suffer accordingly. \"\"\" def __init__(self, use_malware=True, # whether to use the malicious label", "is None: loss_wts = {'malware': 1.0, 'count': 0.1, 'tags': 1.0} loss_dict = {'total':", "path nets_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.dirname(nets_dir) src_dir = os.path.dirname(model_dir) config_filepath = os.path.join(src_dir,", "head self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a Linear Layer with size layer_sizes[-1]", "return the return value def get_embedding(self, data): # current batch of data (features)", "to a default of [512, 512, 128] if layer_sizes is None: layer_sizes =", "Copyright 2021, <NAME>. # # Developed as a thesis project at the TORSEC", "normalize predicted tag array and save it into rv rv['pred_{}_tag'.format(tag)] = Net.detach_and_copy_array(results_dict['tags'][:, column])", "not (default: True) use_counts: Whether to use the counts for the data points", "using operating system dependent functionality from copy import deepcopy # creates a new", "# create count poisson regression head self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1), # append a", "1 nn.ReLU()) # append a Relu activation function module # sigmoid activation function", "size layer_sizes[-1] x 1 nn.Sigmoid()) # append a sigmoid activation function module #", "i == 0: # append the first Linear Layer with dimensions feature_dimension x", "or GPU) malware_labels = labels['malware'].float().to(device) # get predicted malware label, reshape it to", "function to use based on the normalization_function parameter if normalization_function.lower() == 'layer_norm': self.normalization_function", "or GPU) tag_labels = labels['tags'].float().to(device) # get predicted tags and then calculate binary", "(ground truth) dictionary results_dict: Results (predicted labels) dictionary use_malware: Whether to use malware/benignware", "target label is enabled # normalize malware ground truth label array and save", "Dictionary containing labels and predictions. \"\"\" # a lot of deepcopies are done", "import Dataset from .utils.Net import Net as baseNet # get tags from the", "Linear Layer with size layer_sizes[-1] x 64 nn.ELU(), # append an ELU activation", "enabled # extract ground truth count, convert it to float and allocate it", "in writing, software distributed under the License is distributed on # an \"AS", "with size 64 x n_tags nn.Sigmoid()) # append a sigmoid activation function module", "copy calculated malware loss into the loss dictionary loss_dict['malware'] = deepcopy(malware_loss.item()) # update", "set of results dicts and break them out into a single dict of", "raise an exception if self.use_tags and self.n_tags is None: raise ValueError(\"n_tags was None", "value def get_embedding(self, data): # current batch of data (features) \"\"\" Forward batch", "layers = [] # initialize layers array # if layer_sizes was not defined", "targets \"\"\" Take a set of results dicts and break them out into", "and then calculate binary cross entropy loss with respect to the ground truth", "to return value the result of the count head if self.use_tags: rv['tags'] =", "(unused) (default: 32) layer_sizes: Layer sizes (array of sizes) (default: None -> use", "set some default values if loss_wts is None: loss_wts = {'malware': 1.0, 'count':", "the loss dictionary loss_dict['tags'] = deepcopy(tags_loss.item()) # update total loss loss_dict['total'] += tags_loss", "# whether to use the malicious label for the data points or not", "+= count_loss * weight if 'tags' in labels: # if the tags head", "resulting embedding. Args: data: Current batch of data (features) Returns: Dictionary containing the", "applicable law or agreed to in writing, software distributed under the License is", "single dict of 1d arrays with appropriate column names that pandas can convert", "and self.n_tags is None: raise ValueError(\"n_tags was None but we're trying to predict", "points or not use_tags=True, # whether to use the tags for the data", "0.1, 'tags': 1.0} loss_dict = {'total': 0.} # initialize dictionary of losses if", "{'total': 0.} # initialize dictionary of losses if 'malware' in labels: # if", "targets (default: False) Returns: Dictionary containing labels and predictions. \"\"\" # a lot", "or not to use malware/benignware labels as a target use_count=False, # whether or", "layers array # if layer_sizes was not defined (it is None) then initialize", "all tag predictions, performance will suffer accordingly. \"\"\" def __init__(self, use_malware=True, # whether", "512, 128] # select activation function to use based on the activation_function parameter", "data (features) Returns: Dictionary containing the resulting embedding. \"\"\" # get embedding forwarding", "labels and predictions. \"\"\" # a lot of deepcopies are done here to", "Norm layer of size ls layers.append(self.activation_function()) # append an ELU activation function module", "a DataFrame. Args: labels_dict: Labels (ground truth) dictionary results_dict: Results (predicted labels) dictionary", "loss dictionary loss_dict['count'] = deepcopy(count_loss.item()) # update total loss loss_dict['total'] += count_loss *", "use_tags: Whether to use SMART tags as additional targets (default: False) Returns: Dictionary", "Linear Layer with size 64 x 64 nn.ELU(), # append an ELU activation", "if layer_sizes was not defined (it is None) then initialize it to a", "(CPU or GPU) tag_labels = labels['tags'].float().to(device) # get predicted tags and then calculate", "function to use (may be \"elu\", \"leakyRelu\", \"pRelu\" or \"relu\") (default: \"elu\") normalization_function:", "data through the base model return {'embedding': self.model_base(data)} @staticmethod def compute_loss(predictions, # a", "Linear Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension, ls)) else: # append a", "# of professor <NAME> and engineer <NAME> and with the support of engineer", "specific language governing permissions and limitations under the License. import configparser # implements", "creates a new object and recursively copies the original object elements import torch", "activation function {}. Try \"layer_norm\" or \"batch_norm\"' .format(activation_function)) # for each layer size", "0: # append the first Linear Layer with dimensions feature_dimension x ls layers.append(nn.Linear(feature_dimension,", "the tag head return rv # return the return value def get_embedding(self, data):", "(and smaller) layers, as well as a single layer for all tag predictions,", "self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64), # append a Linear Layer with size layer_sizes[-1] x", "not recognised, raise error raise ValueError('Unknown activation function {}. Try \"layer_norm\" or \"batch_norm\"'", "value the result of the tag head return rv # return the return", "# Copyright 2021, <NAME>. # # Developed as a thesis project at the", "[512, 512, 128] if layer_sizes is None: layer_sizes = [512, 512, 128] #", "target is enabled # normalize ground truth count array and save it into", "None raise an exception if self.use_tags and self.n_tags is None: raise ValueError(\"n_tags was", "malware head is enabled # extract ground truth malware label, convert it to" ]
[ "as themes from validator.errorbundler import ErrorBundle from validator.constants import PACKAGE_THEME from helper import", "import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme has a valid chrome manifest", "test_js_banned(): \"\"\"Test that JS is banned in themes.\"\"\" err = _do_real_test_raw(\"\"\"foo();\"\"\", detected_type=PACKAGE_THEME) print", "None def test_js_banned(): \"\"\"Test that JS is banned in themes.\"\"\" err = _do_real_test_raw(\"\"\"foo();\"\"\",", "test_theme_chrome_manifest(): \"Tests that a theme has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest,", "_do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a theme has an invalid chrome", "validator.testcases.themes as themes from validator.errorbundler import ErrorBundle from validator.constants import PACKAGE_THEME from helper", "has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that", "invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is skipped", "that validation is skipped if there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None)", "_do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme has a", "chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a theme has", "if there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned():", "an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is", "def test_js_banned(): \"\"\"Test that JS is banned in themes.\"\"\" err = _do_real_test_raw(\"\"\"foo();\"\"\", detected_type=PACKAGE_THEME)", "\"\"\"Test that JS is banned in themes.\"\"\" err = _do_real_test_raw(\"\"\"foo();\"\"\", detected_type=PACKAGE_THEME) print err.print_summary(verbose=True)", "themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a theme has an invalid chrome manifest", "_do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is skipped if there is no", "there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test", "assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test that JS is banned in", "a theme has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest():", "file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is skipped if there is", "ErrorBundle from validator.constants import PACKAGE_THEME from helper import _do_test from js_helper import _do_real_test_raw", "file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a theme has an invalid", "\"Tests that a theme has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def", "has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation", "theme has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that", "PACKAGE_THEME from helper import _do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that", "test_no_chrome_manifest(): \"Tests that validation is skipped if there is no chrome manifest.\" assert", "is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test that", "that JS is banned in themes.\"\"\" err = _do_real_test_raw(\"\"\"foo();\"\"\", detected_type=PACKAGE_THEME) print err.print_summary(verbose=True) assert", "\"Tests that validation is skipped if there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(),", "test_theme_bad_chrome_manifest(): \"Tests that a theme has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest)", "is skipped if there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None", "import PACKAGE_THEME from helper import _do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests", "None) is None def test_js_banned(): \"\"\"Test that JS is banned in themes.\"\"\" err", "\"Tests that a theme has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False)", "valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a theme", "no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test that JS", "manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a theme has an", "that a theme has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest():", "validator.errorbundler import ErrorBundle from validator.constants import PACKAGE_THEME from helper import _do_test from js_helper", "themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test that JS is banned in themes.\"\"\"", "validator.constants import PACKAGE_THEME from helper import _do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest():", "manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test that JS is banned", "themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is skipped if there is no chrome", "from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme has a valid", "chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def test_js_banned(): \"\"\"Test that JS is", "a theme has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests", "JS is banned in themes.\"\"\" err = _do_real_test_raw(\"\"\"foo();\"\"\", detected_type=PACKAGE_THEME) print err.print_summary(verbose=True) assert err.failed()", "helper import _do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme", "that a theme has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def", "a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests that a", "theme has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\", themes.test_theme_manifest, False) def test_theme_bad_chrome_manifest(): \"Tests", "is None def test_js_banned(): \"\"\"Test that JS is banned in themes.\"\"\" err =", "False) def test_theme_bad_chrome_manifest(): \"Tests that a theme has an invalid chrome manifest file.\"", "from helper import _do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a", "js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme has a valid chrome", "_do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme has a valid chrome manifest file.\"", "import _do_test from js_helper import _do_real_test_raw def test_theme_chrome_manifest(): \"Tests that a theme has", "import validator.testcases.themes as themes from validator.errorbundler import ErrorBundle from validator.constants import PACKAGE_THEME from", "from validator.constants import PACKAGE_THEME from helper import _do_test from js_helper import _do_real_test_raw def", "from validator.errorbundler import ErrorBundle from validator.constants import PACKAGE_THEME from helper import _do_test from", "def test_theme_bad_chrome_manifest(): \"Tests that a theme has an invalid chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\",", "manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is skipped if there", "def test_no_chrome_manifest(): \"Tests that validation is skipped if there is no chrome manifest.\"", "validation is skipped if there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is", "import ErrorBundle from validator.constants import PACKAGE_THEME from helper import _do_test from js_helper import", "def test_theme_chrome_manifest(): \"Tests that a theme has a valid chrome manifest file.\" _do_test(\"tests/resources/themes/pass.jar\",", "skipped if there is no chrome manifest.\" assert themes.test_theme_manifest(ErrorBundle(), None) is None def", "chrome manifest file.\" _do_test(\"tests/resources/themes/fail.jar\", themes.test_theme_manifest) def test_no_chrome_manifest(): \"Tests that validation is skipped if", "themes from validator.errorbundler import ErrorBundle from validator.constants import PACKAGE_THEME from helper import _do_test" ]
[ "api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\")", "return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None:", "import CORS from resources import * import config import sys import os from", "import SSL from flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key", "from resources import * import config import sys import os from OpenSSL import", "\"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'} return", "need to update the token if it exists. SOME requests need to validate", "import os from OpenSSL import SSL from flask import request context = SSL.Context(SSL.SSLv23_METHOD)", "'/HelloWorld') # TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule,", "the token if it exists. SOME requests need to validate the token permissions.", "import sys import os from OpenSSL import SSL from flask import request context", "requests need to update the token if it exists. SOME requests need to", "\"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers", "\"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\")", "#!/usr/bin/env python3 from flask import Flask, render_template, make_response from common import DatabaseMigrator from", "placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\")", "that the SSL certificate exists if not run http:// if os.path.isfile(cer) and os.path.isfile(key):", "make_response(render_template('index.html'), 200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None: raise", "Flask, render_template, make_response from common import DatabaseMigrator from flask_restful import Api from flask_cors", "= CORS(app) # TODO ALL requests need to update the token if it", "api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users,", "= os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api =", "static_folder='dist', template_folder='dist') api = Api(app) cors = CORS(app) # TODO ALL requests need", "need to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep", "api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers =", "not run http:// if os.path.isfile(cer) and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'],", "it exists. SOME requests need to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') #", "context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='',", "import config import sys import os from OpenSSL import SSL from flask import", "'Server shutting down...' db = DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": # Check", "OpenSSL import SSL from flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer'])", "def catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func", "exists. SOME requests need to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO", "\"__main__\": # Check that the SSL certificate exists if not run http:// if", "api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404)", "running with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server", "endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login,", "headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running", "'/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats,", "api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster,", "import Flask, render_template, make_response from common import DatabaseMigrator from flask_restful import Api from", "certificate exists if not run http:// if os.path.isfile(cer) and os.path.isfile(key): context = (cer,", "os.path.isfile(cer) and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'], ssl_context=context, debug=config.app_settings['debug']) else: app.run(host=config.app_settings['host'],", "api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name", "common import DatabaseMigrator from flask_restful import Api from flask_cors import CORS from resources", "os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors = CORS(app)", "'/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League,", "SSL certificate exists if not run http:// if os.path.isfile(cer) and os.path.isfile(key): context =", "request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug Server') func()", "api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\")", "make_response from common import DatabaseMigrator from flask_restful import Api from flask_cors import CORS", "api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'}", "\"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def", "python3 from flask import Flask, render_template, make_response from common import DatabaseMigrator from flask_restful", "remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule')", "import Api from flask_cors import CORS from resources import * import config import", "from flask import Flask, render_template, make_response from common import DatabaseMigrator from flask_restful import", "methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting down...' db = DatabaseMigrator() db.migrate(False) if", "Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors = CORS(app) # TODO ALL", "the SSL certificate exists if not run http:// if os.path.isfile(cer) and os.path.isfile(key): context", "token if it exists. SOME requests need to validate the token permissions. api.add_resource(HelloWorld,", "\"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\")", "\"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\")", "# Check that the SSL certificate exists if not run http:// if os.path.isfile(cer)", "= {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if", "template_folder='dist') api = Api(app) cors = CORS(app) # TODO ALL requests need to", "# TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\")", "CORS from resources import * import config import sys import os from OpenSSL", "None: raise RuntimeError('Not running with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown():", "resources import * import config import sys import os from OpenSSL import SSL", "to update the token if it exists. SOME requests need to validate the", "ALL requests need to update the token if it exists. SOME requests need", "from flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key'])", "token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule')", "def shutdown(): shutdown_server() return 'Server shutting down...' db = DatabaseMigrator() db.migrate(False) if __name__", "Api(app) cors = CORS(app) # TODO ALL requests need to update the token", "exists if not run http:// if os.path.isfile(cer) and os.path.isfile(key): context = (cer, key)", "run http:// if os.path.isfile(cer) and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'], ssl_context=context,", "api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\")", "os from OpenSSL import SSL from flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer", "os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app)", "import DatabaseMigrator from flask_restful import Api from flask_cors import CORS from resources import", "os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'], ssl_context=context, debug=config.app_settings['debug']) else: app.run(host=config.app_settings['host'], port=config.app_settings['port'], debug=config.app_settings['debug'])", "name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\")", "api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation,", "func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug", "api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers)", "\"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\")", "CORS(app) # TODO ALL requests need to update the token if it exists.", "api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'),", "shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the", "catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func =", "import request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app =", "for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder", "shutdown_server() return 'Server shutting down...' db = DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\":", "func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting down...' db = DatabaseMigrator()", "flask_restful import Api from flask_cors import CORS from resources import * import config", "shutting down...' db = DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": # Check that", "config import sys import os from OpenSSL import SSL from flask import request", "if not run http:// if os.path.isfile(cer) and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'],", "from common import DatabaseMigrator from flask_restful import Api from flask_cors import CORS from", "cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api", "\"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\")", "DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": # Check that the SSL certificate exists", "key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors", "static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors = CORS(app) # TODO ALL requests", "\"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\")", "the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule,", "Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting down...' db =", "down...' db = DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": # Check that the", "http:// if os.path.isfile(cer) and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'], ssl_context=context, debug=config.app_settings['debug'])", "SSL from flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key =", "(keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO", "if func is None: raise RuntimeError('Not running with the Werkzeug Server') func() @app.route('/shutdown',", "# TODO ALL requests need to update the token if it exists. SOME", "TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team,", "@app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server():", "debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint", "from OpenSSL import SSL from flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer =", "\"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def", "= Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors = CORS(app) # TODO", "validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging)", "@app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting down...' db = DatabaseMigrator() db.migrate(False)", "api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User,", "SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist')", "from flask_cors import CORS from resources import * import config import sys import", "eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') #", "if __name__ == \"__main__\": # Check that the SSL certificate exists if not", "= DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": # Check that the SSL certificate", "= os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors =", "def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with", "update the token if it exists. SOME requests need to validate the token", "if os.path.isfile(cer) and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'], ssl_context=context, debug=config.app_settings['debug']) else:", "= request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug Server')", "flask_cors import CORS from resources import * import config import sys import os", "request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__,", "sys import os from OpenSSL import SSL from flask import request context =", "200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not", "api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e):", "Api from flask_cors import CORS from resources import * import config import sys", "the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting down...'", "db = DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": # Check that the SSL", "func is None: raise RuntimeError('Not running with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST'])", "== \"__main__\": # Check that the SSL certificate exists if not run http://", "if it exists. SOME requests need to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld')", "headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown')", "flask import request context = SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app", "from flask_restful import Api from flask_cors import CORS from resources import * import", "with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting", "DatabaseMigrator from flask_restful import Api from flask_cors import CORS from resources import *", "to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for", "\"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type':", "api.add_resource(Login, \"/api/login\") api.add_resource(Register, \"/api/register\") api.add_resource(TokenValidation, \"/api/token-check\") api.add_resource(User, \"/api/user\") api.add_resource(Users, \"/api/users\") api.add_resource(GameRoster, \"/api/game-roster/<game_id>\") api.add_resource(Root,", "render_template, make_response from common import DatabaseMigrator from flask_restful import Api from flask_cors import", "Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return 'Server shutting down...' db", "'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is", "flask import Flask, render_template, make_response from common import DatabaseMigrator from flask_restful import Api", "TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule,", "RuntimeError('Not running with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server() return", "import * import config import sys import os from OpenSSL import SSL from", "TODO ALL requests need to update the token if it exists. SOME requests", "app = Flask(__name__, static_url_path='', static_folder='dist', template_folder='dist') api = Api(app) cors = CORS(app) #", "return 'Server shutting down...' db = DatabaseMigrator() db.migrate(False) if __name__ == \"__main__\": #", "= SSL.Context(SSL.SSLv23_METHOD) cer = os.path.join(config.ssl_config['cer']) key = os.path.join(config.ssl_config['key']) app = Flask(__name__, static_url_path='', static_folder='dist',", "db.migrate(False) if __name__ == \"__main__\": # Check that the SSL certificate exists if", "= Api(app) cors = CORS(app) # TODO ALL requests need to update the", "shutdown(): shutdown_server() return 'Server shutting down...' db = DatabaseMigrator() db.migrate(False) if __name__ ==", "and os.path.isfile(key): context = (cer, key) app.run(host=config.app_settings['host'], port=config.app_settings['port'], ssl_context=context, debug=config.app_settings['debug']) else: app.run(host=config.app_settings['host'], port=config.app_settings['port'],", "'/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster,", "* import config import sys import os from OpenSSL import SSL from flask", "\"/api/game-roster/<game_id>\") api.add_resource(Root, \"/\") @app.errorhandler(404) def catch_all(e): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200,", "SOME requests need to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove", "permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule,", "api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game')", "requests need to validate the token permissions. api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually", "raise RuntimeError('Not running with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def shutdown(): shutdown_server()", "is None: raise RuntimeError('Not running with the Werkzeug Server') func() @app.route('/shutdown', methods=['POST']) def", "api = Api(app) cors = CORS(app) # TODO ALL requests need to update", "{'Content-Type': 'text/html'} return make_response(render_template('index.html'), 200, headers) def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func", "api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player, \"/api/player\") api.add_resource(TeamRoster, \"/api/roster/<team_id>\") api.add_resource(League, \"/api/league\") api.add_resource(Team, \"/api/team\") api.add_resource(Login, \"/api/login\") api.add_resource(Register,", "cors = CORS(app) # TODO ALL requests need to update the token if", "Check that the SSL certificate exists if not run http:// if os.path.isfile(cer) and", "__name__ == \"__main__\": # Check that the SSL certificate exists if not run", "'/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule') api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name api.add_resource(GameStats, \"/api/game-stats/<game_id>\") api.add_resource(Player,", "# TODO remove eventually (keep for debugging) api.add_resource(LeagueSchedule, '/api/game-schedule') api.add_resource(GameSchedule, '/api/game') api.add_resource(PlayerSchedule, '/api/player-schedule')" ]
[ "parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\" log = logging.getLogger(__name__)", "have to then run 'cld cloud setup' to get your cloud-specific setup.\") print(\"===============================================================================================================================\")", "class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server", "-a | grep firstmile | head -1 | awk '{print $1}'\" err, output", "FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses", "take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\" def _cleanup(self):", "output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if", "this folder.\") print(\"- Services provisioned using FirstMile are stored in services folder inside", "the entire workspace. If you do that you will have to then run", "you will have to then run 'cld cloud setup' to get your cloud-specific", "steps to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as", "cleanup the workspace.\") print(\"You can also delete the entire workspace. If you do", "common from cliff.command import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log =", "you downloaded firstmile and then run following commands:\") print(\"sudo docker build -t firstmile-img", "service folders to cleanup the workspace.\") print(\"You can also delete the entire workspace.", "commands:\") print(\"sudo docker build -t firstmile-img .\") print(\"sudo docker run -u ubuntu -p", "for all deployments.\") print(\"- Any application that is deployed using FirstMile is stored", "print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder for all deployments.\") print(\"- Any application", "to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace", "delete the entire workspace. If you do that you will have to then", "firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved in firstmile.log\")", "folder.\") print(\"You can delete application folders or service folders to cleanup the workspace.\")", "cliff.command import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__) def", "services folder inside this folder.\") print(\"You can delete application folders or service folders", "err, op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved in firstmile.log\") def", "inside this folder.\") print(\"- Services provisioned using FirstMile are stored in services folder", "| grep firstmile | head -1 | awk '{print $1}'\" err, output =", "import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self):", "cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not", "= \"sudo docker ps -a | grep firstmile | head -1 | awk", "FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\")", "build -t firstmile-img .\") print(\"sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock", "in services folder inside this folder.\") print(\"You can delete application folders or service", "(\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile", "self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\" log = logging.getLogger(__name__) def", "or service folders to cleanup the workspace.\") print(\"You can also delete the entire", "will have to then run 'cld cloud setup' to get your cloud-specific setup.\")", "class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd =", "def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\" log", "FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo", "following commands:\") print(\"sudo docker build -t firstmile-img .\") print(\"sudo docker run -u ubuntu", "are stored in services folder inside this folder.\") print(\"You can delete application folders", "firstmile | head -1 | awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd) if", "log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where you downloaded", "print(\"You can delete application folders or service folders to cleanup the workspace.\") print(\"You", "print(\"Go to the directory where you downloaded firstmile and then run following commands:\")", "from cliff.command import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__)", "Services provisioned using FirstMile are stored in services folder inside this folder.\") print(\"You", "also delete the entire workspace. If you do that you will have to", "where you downloaded firstmile and then run following commands:\") print(\"sudo docker build -t", "ps -a | grep firstmile | head -1 | awk '{print $1}'\" err,", "| head -1 | awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd) if output:", "in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart FirstMile", "self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile", "print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder for all deployments.\") print(\"- Any", "using FirstMile is stored in a directory inside this folder.\") print(\"- Services provisioned", "common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs()", "directory where you downloaded firstmile and then run following commands:\") print(\"sudo docker build", "FirstMile are stored in services folder inside this folder.\") print(\"You can delete application", "def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\" def", "docker build -t firstmile-img .\") print(\"sudo docker run -u ubuntu -p 5002:5002 -v", "to restart FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the", "if not err: print(\"FirstMile logs saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class", "you do that you will have to then run 'cld cloud setup' to", "FirstMile is stored in a directory inside this folder.\") print(\"- Services provisioned using", "cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved", "delete application folders or service folders to cleanup the workspace.\") print(\"You can also", "import common from cliff.command import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log", "folders to cleanup the workspace.\") print(\"You can also delete the entire workspace. If", "\"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker", "firstmile and then run following commands:\") print(\"sudo docker build -t firstmile-img .\") print(\"sudo", "firstmile-img .\") print(\"sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu", "/var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display", "_cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder for all deployments.\") print(\"-", "~/.cld/data/deployments as workspace folder for all deployments.\") print(\"- Any application that is deployed", "can delete application folders or service folders to cleanup the workspace.\") print(\"You can", "logs saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to", "docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs", "print(\"===============================================================================================================================\") print(\"Go to the directory where you downloaded firstmile and then run following", "application that is deployed using FirstMile is stored in a directory inside this", "saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart", "folder for all deployments.\") print(\"- Any application that is deployed using FirstMile is", "directory inside this folder.\") print(\"- Services provisioned using FirstMile are stored in services", "to then run 'cld cloud setup' to get your cloud-specific setup.\") print(\"===============================================================================================================================\") def", "op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved in firstmile.log\") def take_action(self,", "Any application that is deployed using FirstMile is stored in a directory inside", "all deployments.\") print(\"- Any application that is deployed using FirstMile is stored in", "output = output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op =", "not err: print(\"FirstMile logs saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command):", "take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\" log =", "log = logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker ps -a | grep", "| awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip()", ".\") print(\"sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d", "inside this folder.\") print(\"You can delete application folders or service folders to cleanup", "workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder for all", "print(\"sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\")", "parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\")", "-u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self,", "If you do that you will have to then run 'cld cloud setup'", "folder.\") print(\"- Services provisioned using FirstMile are stored in services folder inside this", "import logging import common from cliff.command import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox", "folder inside this folder.\") print(\"You can delete application folders or service folders to", "FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder for", "restart FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory", "firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\"", "= common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log", "firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile", "can also delete the entire workspace. If you do that you will have", "to cleanup the workspace.\") print(\"You can also delete the entire workspace. If you", "-v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps", "workspace folder for all deployments.\") print(\"- Any application that is deployed using FirstMile", "is deployed using FirstMile is stored in a directory inside this folder.\") print(\"-", "using FirstMile are stored in services folder inside this folder.\") print(\"You can delete", "logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where you downloaded firstmile and", "logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker ps -a |", "Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd", "$1}'\" err, output = common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd = (\"sudo", "sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where you", "print(\"You can also delete the entire workspace. If you do that you will", "cmd = \"sudo docker ps -a | grep firstmile | head -1 |", "-d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup", "print(\"- Any application that is deployed using FirstMile is stored in a directory", "print(\"sudo docker build -t firstmile-img .\") print(\"sudo docker run -u ubuntu -p 5002:5002", "sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker ps -a", "as workspace folder for all deployments.\") print(\"- Any application that is deployed using", "$HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to", "awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd", "err, output = common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd = (\"sudo docker", "docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\")", "that is deployed using FirstMile is stored in a directory inside this folder.\")", "a directory inside this folder.\") print(\"- Services provisioned using FirstMile are stored in", "-p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart()", "stored in services folder inside this folder.\") print(\"You can delete application folders or", "then run 'cld cloud setup' to get your cloud-specific setup.\") print(\"===============================================================================================================================\") def take_action(self,", "steps to restart FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to", "output: output = output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op", "to the directory where you downloaded firstmile and then run following commands:\") print(\"sudo", "'{print $1}'\" err, output = common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd =", "workspace.\") print(\"You can also delete the entire workspace. If you do that you", "def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder for all deployments.\")", "= logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker ps -a | grep firstmile", "provisioned using FirstMile are stored in services folder inside this folder.\") print(\"You can", "def _extract_logs(self): cmd = \"sudo docker ps -a | grep firstmile | head", "the directory where you downloaded firstmile and then run following commands:\") print(\"sudo docker", "5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class", "that you will have to then run 'cld cloud setup' to get your", "workspace. If you do that you will have to then run 'cld cloud", "print(\"- Services provisioned using FirstMile are stored in services folder inside this folder.\")", "cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments as workspace folder", "-t firstmile-img .\") print(\"sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v", "ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args):", "stored in a directory inside this folder.\") print(\"- Services provisioned using FirstMile are", "= (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not err:", "downloaded firstmile and then run following commands:\") print(\"sudo docker build -t firstmile-img .\")", "logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker ps -a | grep firstmile |", "\"Display steps to restart FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go", "and then run following commands:\") print(\"sudo docker build -t firstmile-img .\") print(\"sudo docker", "_restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where you downloaded firstmile and then run", "this folder.\") print(\"You can delete application folders or service folders to cleanup the", "_extract_logs(self): cmd = \"sudo docker ps -a | grep firstmile | head -1", "FirstMile sandbox logs\" log = logging.getLogger(__name__) def _extract_logs(self): cmd = \"sudo docker ps", "= logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where you downloaded firstmile", "application folders or service folders to cleanup the workspace.\") print(\"You can also delete", "output = common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd = (\"sudo docker cp", "folders or service folders to cleanup the workspace.\") print(\"You can also delete the", "logging import common from cliff.command import Command class FirstMileLogs(Command): \"Retrieve FirstMile sandbox logs\"", "docker ps -a | grep firstmile | head -1 | awk '{print $1}'\"", "deployments.\") print(\"- Any application that is deployed using FirstMile is stored in a", "do that you will have to then run 'cld cloud setup' to get", "run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def", "in a directory inside this folder.\") print(\"- Services provisioned using FirstMile are stored", "server uses ~/.cld/data/deployments as workspace folder for all deployments.\") print(\"- Any application that", "print(\"FirstMile logs saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display steps", "-v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command):", "= common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved in firstmile.log\") def take_action(self, parsed_args):", "print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._restart() class FirstMileCleanup(Command): \"Display steps to cleanup FirstMile workspace\"", "uses ~/.cld/data/deployments as workspace folder for all deployments.\") print(\"- Any application that is", "err: print(\"FirstMile logs saved in firstmile.log\") def take_action(self, parsed_args): self._extract_logs() class FirstMileRestart(Command): \"Display", "{cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd) if not err: print(\"FirstMile logs saved in", "run following commands:\") print(\"sudo docker build -t firstmile-img .\") print(\"sudo docker run -u", "is stored in a directory inside this folder.\") print(\"- Services provisioned using FirstMile", "if output: output = output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err,", "run 'cld cloud setup' to get your cloud-specific setup.\") print(\"===============================================================================================================================\") def take_action(self, parsed_args):", "common.execute_shell_cmd(cmd) if output: output = output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output)", "FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where", "then run following commands:\") print(\"sudo docker build -t firstmile-img .\") print(\"sudo docker run", "head -1 | awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd) if output: output", "grep firstmile | head -1 | awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd)", "deployed using FirstMile is stored in a directory inside this folder.\") print(\"- Services", "the workspace.\") print(\"You can also delete the entire workspace. If you do that", "= output.rstrip().lstrip() cp_cmd = (\"sudo docker cp {cont_id}:/src/cld.log firstmile.log\").format(cont_id=output) err, op = common.execute_shell_cmd(cp_cmd)", "\"sudo docker ps -a | grep firstmile | head -1 | awk '{print", "class FirstMileRestart(Command): \"Display steps to restart FirstMile sandbox\" log = logging.getLogger(__name__) def _restart(self):", "-1 | awk '{print $1}'\" err, output = common.execute_shell_cmd(cmd) if output: output =", "'cld cloud setup' to get your cloud-specific setup.\") print(\"===============================================================================================================================\") def take_action(self, parsed_args): self._cleanup()", "\"Display steps to cleanup FirstMile workspace\" def _cleanup(self): print(\"===============================================================================================================================\") print(\"FirstMile server uses ~/.cld/data/deployments", "entire workspace. If you do that you will have to then run 'cld", "def _restart(self): print(\"===============================================================================================================================\") print(\"Go to the directory where you downloaded firstmile and then" ]
[ "indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm", "# non deep learning on bag of words # load pickles and libraries", "learning on bag of words # load pickles and libraries from src.utils.eval_metrics import", "with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs", "import * from src.utils.initialize import * from sklearn.model_selection import train_test_split import pickle with", "SVC ######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection import", "X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ######", "print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show as a result", "1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test)", "# save to file to show as a result with open('models/classifier_svc.pkl','wb') as f:", "words # load pickles and libraries from src.utils.eval_metrics import * from src.utils.initialize import", "#### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions)", "predstfidf, target_names=genre_names)) # save to file to show as a result with open('models/classifier_svc.pkl','wb')", "Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC #########", "as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies,", "import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f:", "from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from", "X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y,", "open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f:", "f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection and Test/Train Split", "scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) #", "test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass", "f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) #", "f: Y=pickle.load(f) # Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f)", "predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean =", "target_names=genre_names)) # save to file to show as a result with open('models/classifier_svc.pkl','wb') as", "f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews,", "open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection and", "as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f)", "train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import OneVsRestClassifier", "src.utils.eval_metrics import * from src.utils.initialize import * from sklearn.model_selection import train_test_split import pickle", "parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names))", "import GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import", "from sklearn.metrics import make_scorer from sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1,", "Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import OneVsRestClassifier from", "save to file to show as a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f)", "= np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w') as f: f.write(json.dumps({\"Precision\": prec_mean, \"Recall\": rec_mean}))", "import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with", "parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif", "and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test,", "and libraries from src.utils.eval_metrics import * from src.utils.initialize import * from sklearn.model_selection import", "make_scorer from sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV =", "of words # load pickles and libraries from src.utils.eval_metrics import * from src.utils.initialize", "from sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb')", "load pickles and libraries from src.utils.eval_metrics import * from src.utils.initialize import * from", "= range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20,", "random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC", "generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs))", "= train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import", "predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean", "= generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean =", "SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics import make_scorer", "predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show as a", "import * from sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f)", "sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import classification_report parameters =", "train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f)", "sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as", "Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews))", "as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies =", "import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics import", "import OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import", "np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w') as f: f.write(json.dumps({\"Precision\": prec_mean,", "= np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w') as f: f.write(json.dumps({\"Precision\":", "with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as", "Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w')", "recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import", "f1_score from sklearn.metrics import make_scorer from sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01,", "f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X,", "show as a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name,", "sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters,", "range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42)", "Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train,", "classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to", "predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w') as", "* from sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with", "on bag of words # load pickles and libraries from src.utils.eval_metrics import *", "= {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif =", "from sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'),", "result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs,", "from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from", "libraries from src.utils.eval_metrics import * from src.utils.initialize import * from sklearn.model_selection import train_test_split", "* from src.utils.initialize import * from sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb')", "from src.utils.eval_metrics import * from src.utils.initialize import * from sklearn.model_selection import train_test_split import", "train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from", "import make_scorer from sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV", "= OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file", "pickle with open('data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb')", "{'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV)", "= precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json", "non deep learning on bag of words # load pickles and libraries from", "bag of words # load pickles and libraries from src.utils.eval_metrics import * from", "X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values())", "import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import classification_report parameters = {'kernel':['linear'],", "# load pickles and libraries from src.utils.eval_metrics import * from src.utils.initialize import *", "sklearn.metrics import make_scorer from sklearn.metrics import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]}", "OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to", "movies_with_overviews=pickle.load(f) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature", "from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from", "prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w') as f:", "import classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score,", "to show as a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions =", "precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with", "GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf,", "from src.utils.initialize import * from sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as", "pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name,", "= GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test,", "from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import classification_report parameters", "with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies,", "src.utils.initialize import * from sklearn.model_selection import train_test_split import pickle with open('data/processed/movies_with_overviews.pkl','rb') as f:", "gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print", "open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies", "Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC", "Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show as", "sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics", "with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection", "indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies,", "with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb')", "file to show as a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions", "classification_report parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro'))", "deep learning on bag of words # load pickles and libraries from src.utils.eval_metrics", "# Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies =", "Y=pickle.load(f) # Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies", "Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train,", "pickles and libraries from src.utils.eval_metrics import * from src.utils.initialize import * from sklearn.model_selection", "'C':[0.01, 0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train,", "0.1, 1.0]} gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train)", "Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection and Test/Train Split with", "GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import classification_report", "OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score", "open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as", "classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show", "Split with open('data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test,", "a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf)", "open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs =", "as a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) #### predictions = generate_predictions(Genre_ID_to_name, X_test,", "as f: Y=pickle.load(f) # Feature Selection and Test/Train Split with open('data/processed/X_tfidf.pkl','rb') as f:", "average='micro')) classif = OneVsRestClassifier(gridCV) classif.fit(X_train, Y_train) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save", "(classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show as a result with", "X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean", "as f: Genre_ID_to_name=pickle.load(f) with open('data/processed/Y.pkl','rb') as f: Y=pickle.load(f) # Feature Selection and Test/Train", "sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.metrics", "movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json',", "genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from", "###### SVC ######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection", "precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs))", "test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) ###### SVC ######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import", "sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics", "to file to show as a result with open('models/classifier_svc.pkl','wb') as f: pickle.dump(classif,f) ####", "rec_mean = np.mean(np.asarray(recs)) import json with open('dominostats.json', 'w') as f: f.write(json.dumps({\"Precision\": prec_mean, \"Recall\":", "######### from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV" ]
[ "ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request, view, point): if", "request, view): if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return", "ippon.point.serializers as pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view):", "pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method", "\"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request, view,", "ip.get_tournament_from_fight) return True def has_object_permission(self, request, view, point): if request and request.method in", "ippon.models.tournament as tm import ippon.point.serializers as pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission):", "ippon.models import ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers as pts import ippon.utils.permissions", "return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request, view, point):", "<filename>ippon/point/permissions.py from rest_framework import permissions import ippon.models import ippon.models.fight import ippon.models.tournament as tm", "class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer,", "def has_permission(self, request, view): if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight,", "as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method == \"POST\": return", "pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request, view, point): if request", "request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self,", "has_object_permission(self, request, view, point): if request and request.method in permissions.SAFE_METHODS: return True return", "ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method == \"POST\":", "view): if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True", "IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\",", "import ippon.models.tournament as tm import ippon.point.serializers as pts import ippon.utils.permissions as ip class", "from rest_framework import permissions import ippon.models import ippon.models.fight import ippon.models.tournament as tm import", "if request and request.method in permissions.SAFE_METHODS: return True return tm.TournamentAdmin.objects.filter(tournament=point.fight.team_fight.tournament, user=request.user).count() > 0", "import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method ==", "as pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if", "tm import ippon.point.serializers as pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self,", "import permissions import ippon.models import ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers as", "import ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers as pts import ippon.utils.permissions as", "request, view, point): if request and request.method in permissions.SAFE_METHODS: return True return tm.TournamentAdmin.objects.filter(tournament=point.fight.team_fight.tournament,", "as tm import ippon.point.serializers as pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def", "return True def has_object_permission(self, request, view, point): if request and request.method in permissions.SAFE_METHODS:", "if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def", "\"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request, view, point): if request and", "def has_object_permission(self, request, view, point): if request and request.method in permissions.SAFE_METHODS: return True", "view, point): if request and request.method in permissions.SAFE_METHODS: return True return tm.TournamentAdmin.objects.filter(tournament=point.fight.team_fight.tournament, user=request.user).count()", "point): if request and request.method in permissions.SAFE_METHODS: return True return tm.TournamentAdmin.objects.filter(tournament=point.fight.team_fight.tournament, user=request.user).count() >", "rest_framework import permissions import ippon.models import ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers", "ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request, view): if request.method == \"POST\": return ip.has_object_creation_permission(request,", "== \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request,", "True def has_object_permission(self, request, view, point): if request and request.method in permissions.SAFE_METHODS: return", "permissions import ippon.models import ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers as pts", "import ippon.models import ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers as pts import", "has_permission(self, request, view): if request.method == \"POST\": return ip.has_object_creation_permission(request, pts.PointSerializer, \"fight\", ippon.models.fight.Fight, ip.get_tournament_from_fight)", "ippon.models.fight import ippon.models.tournament as tm import ippon.point.serializers as pts import ippon.utils.permissions as ip", "import ippon.point.serializers as pts import ippon.utils.permissions as ip class IsPointOwnerOrReadOnly(permissions.BasePermission): def has_permission(self, request,", "ippon.models.fight.Fight, ip.get_tournament_from_fight) return True def has_object_permission(self, request, view, point): if request and request.method" ]
[ "kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen.", "check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet", "class and corresponding marshmallow schema for de-/serialization \"\"\" from datetime import datetime import", "ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\"", "import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes", "return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name =", "self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end (used in the validator)\"\"\"", "(used in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of", "Sekunden an dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime),", "datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden", "\"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment] #", "eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema", "Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required", "<gh_stars>1-10 \"\"\" Contains Zeitreihenwert class and corresponding marshmallow schema for de-/serialization \"\"\" from", "`Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime),", "dem das Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start", "Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt):", "end (used in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization", "# pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus", "import attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators", "marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von #", "from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True,", "and corresponding marshmallow schema for de-/serialization \"\"\" from datetime import datetime import attr", ") #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen wurde", "= attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden an dem", "mit Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime =", "datetime import attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from", "attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung", "_get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end (used in the validator)\"\"\" return self.datum_uhrzeit_bis", "\"\"\"return the inclusive start (used in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) ->", "Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib(", "validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end (used in", "class_name = Zeitreihenwert # type:ignore[assignment] # required attributes datum_uhrzeit_von = fields.DateTime(data_key=\"datumUhrzeitVon\") datum_uhrzeit_bis =", "\"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert", "class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen. ..", "of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment] # required attributes datum_uhrzeit_von =", "#: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv)", "the inclusive start (used in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime:", "the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\"", "datetime import datetime import attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt,", "attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import", "an dem das Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive", "pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum,", "Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen. .. HINT::", "und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von:", "def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start (used in the validator)\"\"\" return", "datetime: \"\"\"return the inclusive start (used in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self)", "in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end", "for de-/serialization \"\"\" from datetime import datetime import attr from marshmallow import fields", "@attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und", "validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall", "start (used in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the", "= Zeitreihenwert # type:ignore[assignment] # required attributes datum_uhrzeit_von = fields.DateTime(data_key=\"datumUhrzeitVon\") datum_uhrzeit_bis = fields.DateTime(data_key=\"datumUhrzeitBis\")", "Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet (exklusiv) def _get_inclusive_start(self) ->", "#: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet (exklusiv) def", "import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class", "Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON", "Sekunden an dem das Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the", "Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment] # required attributes datum_uhrzeit_von = fields.DateTime(data_key=\"datumUhrzeitVon\")", "datetime: \"\"\"return the exclusive end (used in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema):", "corresponding marshmallow schema for de-/serialization \"\"\" from datetime import datetime import attr from", "required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit", "\"\"\" # required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum", "-> datetime: \"\"\"return the exclusive end (used in the validator)\"\"\" return self.datum_uhrzeit_bis class", "bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines", "Auflösung Sekunden an dem das Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return", "\"\"\" Contains Zeitreihenwert class and corresponding marshmallow schema for de-/serialization \"\"\" from datetime", "self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert", "import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint:", "(used in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive", "class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert #", "de-/serialization \"\"\" from datetime import datetime import attr from marshmallow import fields from", "disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert", "Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime", "JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von]", "aus Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" #", "de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment] # required attributes datum_uhrzeit_von", "das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #:", "(inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung", "begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit", "\"\"\" from datetime import datetime import attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt", "<https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #:", "Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes", "from datetime import datetime import attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import", "def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end (used in the validator)\"\"\" return", "an dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von]", "Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment] # required", "Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_", "Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime", "mit Auflösung Sekunden an dem das Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime:", "datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden an", "Contains Zeitreihenwert class and corresponding marshmallow schema for de-/serialization \"\"\" from datetime import", "Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet (exklusiv) def _get_inclusive_start(self)", "attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden an dem das", "\"\"\" class_name = Zeitreihenwert # type:ignore[assignment] # required attributes datum_uhrzeit_von = fields.DateTime(data_key=\"datumUhrzeitVon\") datum_uhrzeit_bis", "Zeitreihenwert class and corresponding marshmallow schema for de-/serialization \"\"\" from datetime import datetime", "the exclusive end (used in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema", "(exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start (used in the validator)\"\"\"", "Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum", "Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] )", "# required attributes datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit", "return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end (used in the", "bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True)", "_get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start (used in the validator)\"\"\" return self.datum_uhrzeit_von", "wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit", "-> datetime: \"\"\"return the inclusive start (used in the validator)\"\"\" return self.datum_uhrzeit_von def", "check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen", "marshmallow schema for de-/serialization \"\"\" from datetime import datetime import attr from marshmallow", "fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods", "the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return the exclusive end (used", "datum_uhrzeit_von: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] ) #: Datum Uhrzeit mit Auflösung Sekunden", "dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis: datetime = attr.ib( validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von] )", "from bo4e.validators import check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung", "\"\"\"return the exclusive end (used in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\"", "in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert.", "schema for de-/serialization \"\"\" from datetime import datetime import attr from marshmallow import", "Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv) datum_uhrzeit_bis:", "check_bis_is_later_than_von # pylint: disable=too-few-public-methods @attr.s(auto_attribs=True, kw_only=True) class Zeitreihenwert(Zeitreihenwertkompakt): \"\"\" Abbildung eines Zeitreihenwertes bestehend", "das Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start (used", "exclusive end (used in the validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for", "from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema from bo4e.validators import check_bis_is_later_than_von", "HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime = attr.ib(", "import datetime import attr from marshmallow import fields from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema", ".. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\" # required attributes datum_uhrzeit_von: datetime =", "Messintervall endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start (used in", "for de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment] # required attributes", ") #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet (exklusiv)", "bestehend aus Zeitraum, Wert und Statusinformationen. .. HINT:: `Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_ \"\"\"", "endet (exklusiv) def _get_inclusive_start(self) -> datetime: \"\"\"return the inclusive start (used in the", "ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name = Zeitreihenwert # type:ignore[assignment]", "inclusive start (used in the validator)\"\"\" return self.datum_uhrzeit_von def _get_exclusive_end(self) -> datetime: \"\"\"return", "validator)\"\"\" return self.datum_uhrzeit_bis class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema): \"\"\" Schema for de-/serialization of Zeitreihenwert. \"\"\" class_name" ]
[ "this_line.startswith('import') and ' as ' in this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip()", "True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline comment:", "imports per file ====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for", "False for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '': #print('empty", "look for functions that are defined within that file print('==== local function calls", "' is not local') print('==== imports per file ====') for py_file, import_tuples in", "= dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if", "in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\") for func in func_list:", "for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*',", "for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func: print(' \"'", "print('==== function calls across modules ====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in", "= False if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else: if not this_line.strip()", "= (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') print('==== imports", "this_line and func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in", "and ' as ' in this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if", "contains all the code sans comments dict_of_functions_per_file = {} for py_file, list_of_lines in", "for this_line in list_of_lines: if this_line.startswith('import') and ' as ' not in this_line:", "+ '.' + func + '\" --> \"' + py_file.replace(\".py\",\"\") + '.' +", "elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline comment: ',this_line)", "== '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the code sans comments", "import glob import re list_of_py_files = glob.glob('*.py') py_dict = {} for py_file in", "each file, look for functions that call local functions from other local files", "local function calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in py_code_dict.items(): print(py_file)", "this_line.startswith('import') and ' as ' not in this_line: name_of_file = this_line.replace('import ','').rstrip() if", "defined within that file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file = {} for", "====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples", "for functions that call local functions from other local files print('==== function calls", "dict_of_functions_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line", "py_file.replace(\".py\",\"\") + '.' + func + '\";') print(\" }\") dict_of_imports_per_file = {} for", "if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] #", "= {} for py_file in list_of_py_files: #print(py_file) with open(py_file) as fil: py_content =", "py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file, look for functions", "+ '(' in this_line and func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for", "not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] =", "per file ====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each", "= {} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in", "' is not local') elif this_line.startswith('import') and ' as ' in this_line: name_of_file", "[] for this_line in list_of_lines: if this_line.startswith('import') and ' as ' not in", "= re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments == '': #print('line", "'.' + func + '\" --> \"' + py_file.replace(\".py\",\"\") + '.' + func", "dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line: called_func = re.sub('\\(.*', '', this_line) called_func", "in func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') print(\"", "py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('def", "the code sans comments dict_of_functions_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file]", "this_line.replace('import ','').split(' as ')[0].strip() if name_of_file + '.py' in list_of_py_files: import_alias = this_line.replace('import", "','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local')", "print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\") for func in func_list: print(' \"'", "'': #print('empty line') pass else: # line is not empty # print('this_line =", "py_file in list_of_py_files: #print(py_file) with open(py_file) as fil: py_content = fil.readlines() py_dict[py_file] =", "+ '\";') print(\" }\") dict_of_imports_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file]", "print('==== functions per file ====') for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\"", "within that file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file,", "'\";') # for each file, look for functions that call local functions from", "func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') print(\" }\")", "\"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') print(\" }\") dict_of_imports_per_file =", "'': #print('line is only comment:', this_line) pass else: # line has content if", "+ '\";') # for each file, look for functions that call local functions", "called_func = re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1],", "{} for this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func =", "as fil: py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict = {} for py_file,", "dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for", "which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line and func_in_file", "+ func + '\";') print(\" }\") dict_of_imports_per_file = {} for py_file, list_of_lines in", "dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '):", "print(\" }\") dict_of_imports_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = []", "inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else: if not", "is not empty # print('this_line = ', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip()", "origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ',''))", "this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line') pass", "in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line') pass else:", "{} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for", "subgraph cluster_\" + py_file.replace('.py','') + \"{\") for func in func_list: print(' \"' +", "'', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func) for func_in_file in", "py_dict[py_file] = py_content py_code_dict = {} for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file]", "+ py_file.replace('.py','') + \"{\") for func in func_list: print(' \"' + py_file.replace(\".py\",\"\") +", "as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not", "py_code_dict = {} for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment", "py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('import')", "else: # line has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True", "' not in this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias", "= py_content py_code_dict = {} for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] =", "list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ',''))", "if this_tup[1] in this_line: called_func = re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1],", "dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') elif this_line.startswith('import') and ' as", "'.' + which_func + '\" --> \"' + called_func + '\";') # EOF", "= [] for this_line in list_of_lines: if this_line.startswith('import') and ' as ' not", "+ '.' + func + '\";') print(\" }\") dict_of_imports_per_file = {} for py_file,", "# for each file, look for functions that are defined within that file", "dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\") for func in func_list: print('", "re list_of_py_files = glob.glob('*.py') py_dict = {} for py_file in list_of_py_files: #print(py_file) with", "py_file.replace('.py','') + \"{\") for func in func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.'", "}\") dict_of_imports_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for", "not empty # print('this_line = ', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() #", "in this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import", "dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.'", "functions that call local functions from other local files print('==== function calls across", "list_of_py_files = glob.glob('*.py') py_dict = {} for py_file in list_of_py_files: #print(py_file) with open(py_file)", "for each file, look for functions that call local functions from other local", "code sans comments dict_of_functions_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] =", "+ '.' + which_func + '\" --> \"' + called_func + '\";') #", "not in this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias =", "for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup", "dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if not", "tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') elif", "= {} for this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func", "# for each file, look for functions that call local functions from other", "line') pass else: # line is not empty # print('this_line = ', this_line)", "import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file", "',this_line) pass else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now", "'.' + func + '\";') # for each file, look for functions that", "empty # print('this_line = ', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments", "this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func", "local files print('==== function calls across modules ====') dict_of_funcs_called_from_module = {} for origin_py_file,", "# line has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True elif", "name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if name_of_file + '.py' in list_of_py_files: import_alias", "pass else: # line is not empty # print('this_line = ', this_line) line_without_comments", "# print('line_without_comments = ',line_without_comments) if line_without_comments == '': #print('line is only comment:', this_line)", "as ' not in this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files:", "list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('====", "comment: ',this_line) pass else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict", "in list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup)", "print('==== local function calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in py_code_dict.items():", "= (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') elif this_line.startswith('import')", "for func in called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func +", "for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line')", "'(' in this_line and func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func,", "print('this_line = ', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments)", "dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples =", "else: # line is not empty # print('this_line = ', this_line) line_without_comments =", "re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func) for func_in_file", "this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line: called_func = re.sub('\\(.*', '',", "'\";') print(\" }\") dict_of_imports_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] =", "list_of_py_files: import_alias = this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file +", "list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('def '):", "[] for this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*',", "this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' +", "# print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func", "inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside", "{} for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False", "this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','')", "other local files print('==== function calls across modules ====') dict_of_funcs_called_from_module = {} for", "func in func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";')", "inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment:", "all the code sans comments dict_of_functions_per_file = {} for py_file, list_of_lines in py_code_dict.items():", "dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func: print('", "call local functions from other local files print('==== function calls across modules ====')", "fil.readlines() py_dict[py_file] = py_content py_code_dict = {} for py_file, list_of_lines in py_dict.items(): #print(py_file)", "dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def", "in this_line: called_func = re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file,", "print('==== imports per file ====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) #", "in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('import') and '", "if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup)", "False if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else: if not this_line.strip() ==", "for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False for", "print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') print(\" }\") dict_of_imports_per_file", "dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') print('==== imports per file ====')", "is not local') print('==== imports per file ====') for py_file, import_tuples in dict_of_imports_per_file.items():", "and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else:", "+ func + '\" --> \"' + py_file.replace(\".py\",\"\") + '.' + func +", "content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment:", "','').split(' as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is", "functions that are defined within that file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file", "#print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False for this_line in list_of_lines: line_without_trailing_spaces =", "this_tup) for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func =", "print(name_of_file + ' is not local') print('==== imports per file ====') for py_file,", "modules ====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {}", "this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments == '': #print('line is only comment:',", "this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' +", "re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line: called_func =", "sans comments dict_of_functions_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = []", "py_dict = {} for py_file in list_of_py_files: #print(py_file) with open(py_file) as fil: py_content", "this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass", "local functions from other local files print('==== function calls across modules ====') dict_of_funcs_called_from_module", "for func in func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func +", "for this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*',", "file ====') for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') +", "print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line", "= re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") +", "py_file.replace(\".py\",\"\") + '.' + func + '\" --> \"' + py_file.replace(\".py\",\"\") + '.'", "for each file, look for functions that are defined within that file print('====", "called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\" --> \"'", "this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if name_of_file + '.py' in list_of_py_files:", "= fil.readlines() py_dict[py_file] = py_content py_code_dict = {} for py_file, list_of_lines in py_dict.items():", "'): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per file", "this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]:", "= {} for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment =", "list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else:", "file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in", "is only comment:', this_line) pass else: # line has content if this_line.strip().startswith('\"\"\"') and", "for this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '',", "in this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if name_of_file + '.py' in", "import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines:", "= this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','') tup =", "= {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup) for this_line", "in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ','')))", "and ' as ' not in this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py'", "'): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =',", "#print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per file ====')", "origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples:", "print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def", "else: print(name_of_file + ' is not local') print('==== imports per file ====') for", "in import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def", "# print('this_line = ', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments =", "dict_of_imports_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line", "this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '',", "not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the code", "in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def", "in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file, look for functions that are", "====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file, look", "in list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def", "')[0].strip() if name_of_file + '.py' in list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip()", "multiline comment: ',this_line) pass else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) #", "called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func: print(' \"' + py_file.replace(\".py\",\"\")", "this_line) pass else: # line has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment", "this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1]", "only comment:', this_line) pass else: # line has content if this_line.strip().startswith('\"\"\"') and not", "func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if", "#print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the code sans comments dict_of_functions_per_file =", "list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines: if not", "if line_without_trailing_spaces == '': #print('empty line') pass else: # line is not empty", "cluster_\" + py_file.replace('.py','') + \"{\") for func in func_list: print(' \"' + py_file.replace(\".py\",\"\")", "= [] inside_multiline_comment = False for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if", "in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines: if not this_line.lstrip().startswith('@'):", "not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] =", "this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = []", "====') for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\")", "name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else:", "this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\" -->", "#print('inside multiline comment: ',this_line) pass else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip())", "for this_tup in import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'):", "= this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file +", "import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '):", "line_without_trailing_spaces == '': #print('empty line') pass else: # line is not empty #", "if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment", "files print('==== function calls across modules ====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines", "called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\" --> \"'", "dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file, look for functions that are defined", "this_line in list_of_lines: if this_line.startswith('import') and ' as ' not in this_line: name_of_file", "====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {}", "this_tup[1] in this_line: called_func = re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func)", "dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file", "functions per file ====') for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" +", "as ' in this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if name_of_file +", "= ', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if", "with open(py_file) as fil: py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict = {}", "which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line:", "if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else: if not this_line.strip() == '\"\"\"':", "py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines: if", "{} for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in", "func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func: print(' \"' +", "dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup) for", "'', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line: called_func = re.sub('\\(.*',", "= this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line') pass else: # line is", "this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per file ====') for py_file,", "local') print('==== imports per file ====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples)", "py_content py_code_dict = {} for py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = []", "= glob.glob('*.py') py_dict = {} for py_file in list_of_py_files: #print(py_file) with open(py_file) as", "line_without_comments == '': #print('line is only comment:', this_line) pass else: # line has", "',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per file ====') for py_file, func_list", "in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*',", "in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func: print(' \"' + py_file.replace(\".py\",\"\") +", "list_of_py_files: #print(py_file) with open(py_file) as fil: py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict", "this_line: called_func = re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func,", "in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file,", "= {} for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line", "func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\") for func in", "func + '\";') # for each file, look for functions that call local", "if len(called_func)>0: for func in called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.' +", "local') elif this_line.startswith('import') and ' as ' in this_line: name_of_file = this_line.replace('import ','').split('", "len(called_func)>0: for func in called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func", "= [] if this_tup[1] in this_line: called_func = re.sub('\\(.*', '', this_line) called_func =", "#print(py_file) with open(py_file) as fil: py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict =", "if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the", "func in called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\"", "this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not", "','').split(' as ')[0].strip() if name_of_file + '.py' in list_of_py_files: import_alias = this_line.replace('import ','').split('", "',''))) print('==== functions per file ====') for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph", "+ \"{\") for func in func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.' +", "function calls across modules ====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in py_code_dict.items():", "list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False for this_line in", "func_in_file + '(' in this_line and func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file)", "origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\" --> \"' + called_func + '\";')", "if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func]", "print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\" --> \"' +", "print(name_of_file + ' is not local') elif this_line.startswith('import') and ' as ' in", "py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('import') and ' as", "= {} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in", "import_alias = this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + '", "print('line_without_comments = ',line_without_comments) if line_without_comments == '': #print('line is only comment:', this_line) pass", "+ '.py' in list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file,", "this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments ==", "'', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per file ====') for", "which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\"", "print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func", "if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions", "this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment =", "if name_of_file + '.py' in list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup", "this_line.replace('def ',''))) print('==== functions per file ====') for py_file, func_list in dict_of_functions_per_file.items(): print(\"", "func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line and func_in_file != which_func:", "line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line') pass else: # line", "python3 import glob import re list_of_py_files = glob.glob('*.py') py_dict = {} for py_file", "has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and", "'', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments == '': #print('line is only", "py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the code sans comments dict_of_functions_per_file = {}", "this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = []", "elif this_line.startswith('import') and ' as ' in this_line: name_of_file = this_line.replace('import ','').split(' as", "that are defined within that file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file =", "','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if", "(name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') elif this_line.startswith('import') and", "!= which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0:", "(name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') print('==== imports per", "','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line: called_func = re.sub('\\(.*', '', this_line)", "= False for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '':", "that call local functions from other local files print('==== function calls across modules", "re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.'", "pass else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains", "+ ' is not local') print('==== imports per file ====') for py_file, import_tuples", "list_of_lines: if this_line.startswith('import') and ' as ' not in this_line: name_of_file = this_line.replace('import", "dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line and func_in_file != which_func: # print(func_in_file,", "functions from other local files print('==== function calls across modules ====') dict_of_funcs_called_from_module =", "not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if", "comments dict_of_functions_per_file = {} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for", "\"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\" --> \"' + called_func", "py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines: if not this_line.lstrip().startswith('@'): if", "line is not empty # print('this_line = ', this_line) line_without_comments = re.sub('#.*', '',", "=', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line and", "calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] =", "which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func)", "in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False for this_line in list_of_lines:", "{} for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines:", "are defined within that file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file = {}", "# py_code_dict now contains all the code sans comments dict_of_functions_per_file = {} for", "called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\")", "pass else: # line has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment =", "else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all", "if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if", "and func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items():", "+ '.' + func + '\";') # for each file, look for functions", "is not local') elif this_line.startswith('import') and ' as ' in this_line: name_of_file =", "not local') print('==== imports per file ====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file,", "tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') print('====", "for py_file in list_of_py_files: #print(py_file) with open(py_file) as fil: py_content = fil.readlines() py_dict[py_file]", "#print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func +", "for functions that are defined within that file print('==== local function calls ====')", "file, look for functions that call local functions from other local files print('====", "re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print('", "# line is not empty # print('this_line = ', this_line) line_without_comments = re.sub('#.*',", "{} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup) for this_line in", "',line_without_comments) if line_without_comments == '': #print('line is only comment:', this_line) pass else: #", "look for functions that call local functions from other local files print('==== function", "[] inside_multiline_comment = False for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces", "function calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file]", "'', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"'", "#print('empty line') pass else: # line is not empty # print('this_line = ',", "func + '\";') print(\" }\") dict_of_imports_per_file = {} for py_file, list_of_lines in py_code_dict.items():", "# print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '(' in", "+ py_file.replace(\".py\",\"\") + '.' + func + '\";') # for each file, look", "' as ' in this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if name_of_file", "if line_without_comments == '': #print('line is only comment:', this_line) pass else: # line", "= this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is", "'): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in", "in called_func: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func + '\" -->", "+ py_file.replace(\".py\",\"\") + '.' + func + '\" --> \"' + py_file.replace(\".py\",\"\") +", "= this_line.replace('import ','').split(' as ')[0].strip() if name_of_file + '.py' in list_of_py_files: import_alias =", "this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','') tup = (name_of_file,", "for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines: if", "for py_file, list_of_lines in py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines: if", "fil: py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict = {} for py_file, list_of_lines", "= {} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file]", "not local') elif this_line.startswith('import') and ' as ' in this_line: name_of_file = this_line.replace('import", "+ '\" --> \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') #", "glob import re list_of_py_files = glob.glob('*.py') py_dict = {} for py_file in list_of_py_files:", "for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file, look for", "py_code_dict[py_file] = [] inside_multiline_comment = False for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip()", "dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('import') and ' as '", "that file print('==== local function calls ====') dict_of_funcs_called_per_func_per_file = {} for py_file, list_of_lines", "for py_file, list_of_lines in py_code_dict.items(): print(py_file) dict_of_funcs_called_per_func_per_file[py_file] = {} for this_line in list_of_lines:", "glob.glob('*.py') py_dict = {} for py_file in list_of_py_files: #print(py_file) with open(py_file) as fil:", "#!/usr/bin/env python3 import glob import re list_of_py_files = glob.glob('*.py') py_dict = {} for", "py_code_dict now contains all the code sans comments dict_of_functions_per_file = {} for py_file,", "in list_of_py_files: import_alias = this_line.replace('import ','') tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file", "inside_multiline_comment = False for this_line in list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces ==", "')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local')", "py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False for this_line in list_of_lines: line_without_trailing_spaces", "this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '',", "\"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') # for each file,", "= ',line_without_comments) if line_without_comments == '': #print('line is only comment:', this_line) pass else:", "as ')[0].strip() if name_of_file + '.py' in list_of_py_files: import_alias = this_line.replace('import ','').split(' as", "= [] # print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file +", "calls across modules ====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file]", "inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else: if not this_line.strip() == '\"\"\"': #print(this_line.rstrip())", "'\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the code sans comments dict_of_functions_per_file", "which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for", "for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line and func_in_file !=", "' in this_line: name_of_file = this_line.replace('import ','').split(' as ')[0].strip() if name_of_file + '.py'", "[] # print('which_func =', which_func) for func_in_file in dict_of_functions_per_file[py_file]: if func_in_file + '('", "py_file, list_of_lines in py_dict.items(): #print(py_file) py_code_dict[py_file] = [] inside_multiline_comment = False for this_line", "{} for py_file in list_of_py_files: #print(py_file) with open(py_file) as fil: py_content = fil.readlines()", "line has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"')", "import re list_of_py_files = glob.glob('*.py') py_dict = {} for py_file in list_of_py_files: #print(py_file)", "name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','') tup", "from other local files print('==== function calls across modules ====') dict_of_funcs_called_from_module = {}", "list_of_lines: line_without_trailing_spaces = this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line') pass else: #", "each file, look for functions that are defined within that file print('==== local", "for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\") for", "+ func + '\";') # for each file, look for functions that call", "--> \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') # for each", "','').rstrip() if name_of_file+'.py' in list_of_py_files: import_alias = this_line.replace('import ','') tup = (name_of_file, import_alias)", "file ====') for py_file, import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file,", "if not this_line.lstrip().startswith('@'): if this_line.lstrip().startswith('def '): which_func = re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func]", "called_func) #print(origin_py_file, which_func, this_tup[1], called_func) print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func", "'\" --> \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";') # for", "+ origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\" --> \"' + called_func +", "dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per file ====') for py_file, func_list in", "this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def", "= re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_per_func_per_file[py_file][which_func] = [] # print('which_func =', which_func) for", "', this_line) line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments", "== '': #print('empty line') pass else: # line is not empty # print('this_line", "print(py_file, import_tuples) # for each file, look for functions that are defined within", "origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in", "= [] for this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ','')))", "+ ' is not local') elif this_line.startswith('import') and ' as ' in this_line:", "py_file.replace(\".py\",\"\") + '.' + func + '\";') # for each file, look for", "in dict_of_functions_per_file[py_file]: if func_in_file + '(' in this_line and func_in_file != which_func: #", "this_tup in import_tuples: print(origin_py_file, this_tup) for this_line in origin_list_of_lines: if not this_line.lstrip().startswith('@'): if", "print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in", "'.py' in list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file, import_alias)", "py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] = {} import_tuples = dict_of_imports_per_file[origin_py_file] for this_tup in import_tuples: print(origin_py_file, this_tup)", "'', this_line.replace('def ',''))) print('==== functions per file ====') for py_file, func_list in dict_of_functions_per_file.items():", "== '': #print('line is only comment:', this_line) pass else: # line has content", "print(' \"' + origin_py_file.replace(\".py\",\"\") + '.' + which_func + '\" --> \"' +", "this_line.strip() == '\"\"\"': #print(this_line.rstrip()) py_code_dict[py_file].append(line_without_comments.rstrip()) # py_code_dict now contains all the code sans", "import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') elif this_line.startswith('import') and '", "import_tuples in dict_of_imports_per_file.items(): print(py_file, import_tuples) # for each file, look for functions that", "[] if this_tup[1] in this_line: called_func = re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1],", "open(py_file) as fil: py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict = {} for", "name_of_file + '.py' in list_of_py_files: import_alias = this_line.replace('import ','').split(' as ')[1].strip() tup =", "comment:', this_line) pass else: # line has content if this_line.strip().startswith('\"\"\"') and not inside_multiline_comment:", "inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline comment: ',this_line) pass else: if", "{} for py_file, list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines:", "= True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False if inside_multiline_comment: #print('inside multiline", "in list_of_py_files: #print(py_file) with open(py_file) as fil: py_content = fil.readlines() py_dict[py_file] = py_content", "#print('line is only comment:', this_line) pass else: # line has content if this_line.strip().startswith('\"\"\"')", "if this_line.startswith('import') and ' as ' not in this_line: name_of_file = this_line.replace('import ','').rstrip()", "py_content = fil.readlines() py_dict[py_file] = py_content py_code_dict = {} for py_file, list_of_lines in", "' as ' not in this_line: name_of_file = this_line.replace('import ','').rstrip() if name_of_file+'.py' in", "func + '\" --> \"' + py_file.replace(\".py\",\"\") + '.' + func + '\";')", "import_tuples) # for each file, look for functions that are defined within that", "this_line.replace('import ','').split(' as ')[1].strip() tup = (name_of_file, import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + '", "import_alias) dict_of_imports_per_file[py_file].append(tup) else: print(name_of_file + ' is not local') print('==== imports per file", "if func_in_file + '(' in this_line and func_in_file != which_func: # print(func_in_file, this_line)", "line_without_comments = re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments == '':", "else: print(name_of_file + ' is not local') elif this_line.startswith('import') and ' as '", "and not inside_multiline_comment: inside_multiline_comment = True elif this_line.strip().startswith('\"\"\"') and inside_multiline_comment: inside_multiline_comment = False", "in this_line and func_in_file != which_func: # print(func_in_file, this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func", "re.sub('#.*', '', this_line).rstrip() # print('line_without_comments = ',line_without_comments) if line_without_comments == '': #print('line is", "this_line.startswith('def '): #print(re.sub('\\(.*', '', this_line.replace('def ',''))) dict_of_functions_per_file[py_file].append(re.sub('\\(.*', '', this_line.replace('def ',''))) print('==== functions per", "py_code_dict.items(): dict_of_functions_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('def '): #print(re.sub('\\(.*', '',", "= re.sub('\\(.*', '', this_line) called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func) #print(origin_py_file, which_func, this_tup[1], called_func)", "\"' + py_file.replace(\".py\",\"\") + '.' + func + '\" --> \"' + py_file.replace(\".py\",\"\")", "+ py_file.replace(\".py\",\"\") + '.' + func + '\";') print(\" }\") dict_of_imports_per_file = {}", "list_of_lines in py_code_dict.items(): dict_of_imports_per_file[py_file] = [] for this_line in list_of_lines: if this_line.startswith('import') and", "across modules ====') dict_of_funcs_called_from_module = {} for origin_py_file, origin_list_of_lines in py_code_dict.items(): dict_of_funcs_called_from_module[origin_py_file] =", "this_line) dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file) for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items(): if len(called_func)>0: for func in called_func:", "file, look for functions that are defined within that file print('==== local function", "per file ====') for py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','')", "this_line.rstrip() if line_without_trailing_spaces == '': #print('empty line') pass else: # line is not", "now contains all the code sans comments dict_of_functions_per_file = {} for py_file, list_of_lines", "\"{\") for func in func_list: print(' \"' + py_file.replace(\".py\",\"\") + '.' + func", "py_file, func_list in dict_of_functions_per_file.items(): print(\" subgraph cluster_\" + py_file.replace('.py','') + \"{\") for func", "= re.sub('\\(.*', '', this_line.replace('def ','')) dict_of_funcs_called_from_module[origin_py_file][which_func] = [] if this_tup[1] in this_line: called_func", "'.' + func + '\";') print(\" }\") dict_of_imports_per_file = {} for py_file, list_of_lines", "in list_of_lines: if this_line.startswith('import') and ' as ' not in this_line: name_of_file =" ]
[ "'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE:", "sp_auth_url, enabled, description) changed = True else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider(", "KIND, either express or implied. # See the License for the specific language", "def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud", "Unless required by applicable law or agreed to in writing, software # distributed", "service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed", "= module.params['state'] try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed =", "provider on the keystone identity provider. options: service_provider_id: description: - A globally unique", "Exception as e: module.fail_json(msg=\"service provider failed: %s\" % str(e)) # this is magic,", "'present': if not service_provider: return True return _needs_update(module, service_provider) if state == 'absent'", "this file except in compliance with the License. # You may obtain a", "== 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception as e:", "is used to authenticate with the identity provider This URL should be available", "return cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client def get_service_provider(self, sp_id):", "ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- author: <NAME> module: keystone_service_provider short_description:", "cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client", "keystone idp description: - This module registers a keystone service provider on the", "service_provider): \"\"\"Check for differences in the updatable values. Note: Names cannot be updated.", "def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url,", "required: true service_provider_url: description: - URL that is found in the service provider's", "is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL", ") module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade", "ANY KIND, either express or implied. # See the License for the specific", "the updatable values. Note: Names cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url',", "in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return sp return None def create_service_provider(", "type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module =", "description=description) return service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider =", "description: - URL that is found in the service provider's metadata (Which is", "service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs =", "module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider failed: %s\" % str(e)) # this", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "%s\" % str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import *", "provider's metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url:", "the specific language governing permissions and # limitations under the License. # try:", "= self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id)", "from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()", "provider This URL should be available once the idp registered on the sp", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A value of", "state == 'present': if not service_provider: return True return _needs_update(module, service_provider) if state", "cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self, keystone_client):", "OF ANY KIND, either express or implied. # See the License for the", "shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self,", "service_provider_url: description: - URL that is found in the service provider's metadata (Which", "= AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module')", "see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ ==", "except Exception as e: module.fail_json(msg=\"service provider failed: %s\" % str(e)) # this is", "None) if module_val != getattr(service_provider, sp_attr, None): return True return False def _system_state_change(module,", "sp return None def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider =", "argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent',", "disables it. default: True description: description: The description of the service provider. state:", "module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description'] state = module.params['state'] try: cloud =", "License. # try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False", "This module registers a keystone service provider on the keystone identity provider. options:", "sp_id, sp_url, sp_auth_url, enabled, description) changed = True else: if _needs_update(module, service_provider): service_provider", "True else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description)", "module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description']", "enabled, description]) if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed)", "provider. state: description: - Indicate desired state of the resource choices: ['present', 'absent']", "\"\"\"Check for differences in the updatable values. Note: Names cannot be updated. \"\"\"", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled,", "-https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL that is used to authenticate with", "desired state of the resource choices: ['present', 'absent'] default: present ''' def _needs_update(module,", "specific language governing permissions and # limitations under the License. # try: import", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "identity provider. options: service_provider_id: description: - A globally unique id to identify the", "state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if", "resource choices: ['present', 'absent'] default: present ''' def _needs_update(module, service_provider): \"\"\"Check for differences", "enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec(", "HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- author:", "service_provider): state = module.params['state'] if state == 'present': if not service_provider: return True", "description) changed = True else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url,", "if not service_provider: return True return _needs_update(module, service_provider) if state == 'absent' and", "URL that is found in the service provider's metadata (Which is usually found", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description)", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "module_attr in params_dict.items(): module_val = module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr, None):", "HAS_SHADE = False DOCUMENTATION = ''' --- author: <NAME> module: keystone_service_provider short_description: register", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# -*- coding: utf-8 -*- # Copyright 2016, IBM # Licensed under the", "values. Note: Names cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description')", "sp_auth_url, enabled, description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description])", "DOCUMENTATION = ''' --- author: <NAME> module: keystone_service_provider short_description: register sp on keystone", "is required for this module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url =", "service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id,", "the service provider's metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required:", "required by applicable law or agreed to in writing, software # distributed under", "description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state", "be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in", "Copyright 2016, IBM # Licensed under the Apache License, Version 2.0 (the \"License\");", "openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for", "id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url,", "applicable law or agreed to in writing, software # distributed under the License", "in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL that is used", "the License. # try: import shade HAS_SHADE = True except ImportError: HAS_SHADE =", "sp_id: return sp return None def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description):", "= False if state == 'present': if not service_provider: service_provider = cloud.create_service_provider( sp_id,", "enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr, None) if module_val", "found in the service provider's metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example", "sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A value of True", "or agreed to in writing, software # distributed under the License is distributed", "= False DOCUMENTATION = ''' --- author: <NAME> module: keystone_service_provider short_description: register sp", "A globally unique id to identify the service provider example -sp.id required: true", "\"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items(): module_val", "coding: utf-8 -*- # Copyright 2016, IBM # Licensed under the Apache License,", "usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL that", "try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION =", "enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "def __init__(self, keystone_client): self.client = keystone_client def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list():", "= cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True else: if _needs_update(module,", "cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed)", "keystone_service_provider short_description: register sp on keystone idp description: - This module registers a", "HAS_SHADE: module.fail_json(msg='shade is required for this module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url']", "service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs()", "service_provider_id: description: - A globally unique id to identify the service provider example", "if state == 'present': if not service_provider: return True return _needs_update(module, service_provider) if", "try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider)", "['present', 'absent'] default: present ''' def _needs_update(module, service_provider): \"\"\"Check for differences in the", "'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A value of True enables the service", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "enabled = module.params['enabled'] description = module.params['description'] state = module.params['state'] try: cloud = _get_cloud(**module.params)", "writing, software # distributed under the License is distributed on an \"AS IS\"", "'absent'] default: present ''' def _needs_update(module, service_provider): \"\"\"Check for differences in the updatable", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return sp", "= module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description'] state = module.params['state'] try: cloud", "License. # You may obtain a copy of the License at # #", "that is used to authenticate with the identity provider This URL should be", "module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is", "getattr(service_provider, sp_attr, None): return True return False def _system_state_change(module, service_provider): state = module.params['state']", "compliance with the License. # You may obtain a copy of the License", "module.fail_json(msg='shade is required for this module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url", "enables the service provider and False disables it. default: True description: description: The", "-*- coding: utf-8 -*- # Copyright 2016, IBM # Licensed under the Apache", "language governing permissions and # limitations under the License. # try: import shade", "self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return sp return None def create_service_provider( self,", "return None def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create(", "with the identity provider This URL should be available once the idp registered", "module.params['enabled'] description = module.params['description'] state = module.params['state'] try: cloud = _get_cloud(**module.params) service_provider =", "- URL that is used to authenticate with the identity provider This URL", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "== 'present': if not service_provider: return True return _needs_update(module, service_provider) if state ==", "of the resource choices: ['present', 'absent'] default: present ''' def _needs_update(module, service_provider): \"\"\"Check", "_get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class", "sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider", "_system_state_change(module, service_provider): state = module.params['state'] if state == 'present': if not service_provider: return", "sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider", "a keystone service provider on the keystone identity provider. options: service_provider_id: description: -", "self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def", "is found in the service provider's metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata)", "= module.params['description'] state = module.params['state'] try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if", "not use this file except in compliance with the License. # You may", "lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__':", "state = module.params['state'] try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed", "default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)", "'absent' and service_provider: return True return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version']", "provider and False disables it. default: True description: description: The description of the", "except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- author: <NAME> module: keystone_service_provider", "License, Version 2.0 (the \"License\"); # you may not use this file except", "enabled=enabled, description=description) return service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider", "for sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr, None) if module_val != getattr(service_provider,", "provider example -sp.id required: true service_provider_url: description: - URL that is found in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "module.exit_json(changed=changed) changed = False if state == 'present': if not service_provider: service_provider =", "% str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from", "changed = True else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url,", "= True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider failed: %s\" % str(e))", "sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec", "module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description'] state = module.params['state']", "URL should be available once the idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/'", "if module_val != getattr(service_provider, sp_attr, None): return True return False def _system_state_change(module, service_provider):", "sp_url, sp_auth_url, enabled, description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled,", "# you may not use this file except in compliance with the License.", "module.params['state'] if state == 'present': if not service_provider: return True return _needs_update(module, service_provider)", "example -sp.id required: true service_provider_url: description: - URL that is found in the", "agreed to in writing, software # distributed under the License is distributed on", "differences in the updatable values. Note: Names cannot be updated. \"\"\" params_dict =", "enabled, description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if", "required: true enabled: description: - A value of True enables the service provider", "module_val = module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr, None): return True return", "(the \"License\"); # you may not use this file except in compliance with", "in the service provider's metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP", "value of True enables the service provider and False disables it. default: True", "metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description:", "of the service provider. state: description: - Indicate desired state of the resource", "True return False def _system_state_change(module, service_provider): state = module.params['state'] if state == 'present':", "# Unless required by applicable law or agreed to in writing, software #", "cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object):", "= True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state == 'absent':", "by applicable law or agreed to in writing, software # distributed under the", "globally unique id to identify the service provider example -sp.id required: true service_provider_url:", "if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception", "Note: Names cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True),", "service_provider) module.exit_json(changed=changed) changed = False if state == 'present': if not service_provider: service_provider", "True enables the service provider and False disables it. default: True description: description:", "enabled: description: - A value of True enables the service provider and False", "False if state == 'present': if not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url,", "choices: ['present', 'absent'] default: present ''' def _needs_update(module, service_provider): \"\"\"Check for differences in", "= dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr,", "== 'absent' and service_provider: return True return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs)", "'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service", "service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider failed:", "the service provider. state: description: - Indicate desired state of the resource choices:", "= cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if", "sp_attr, None): return True return False def _system_state_change(module, service_provider): state = module.params['state'] if", "= module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description =", "file except in compliance with the License. # You may obtain a copy", "provider. options: service_provider_id: description: - A globally unique id to identify the service", "import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = '''", "changed = True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider failed: %s\" %", "2016, IBM # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state ==", "module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state == 'absent': if service_provider:", "License for the specific language governing permissions and # limitations under the License.", "registers a keystone service provider on the keystone identity provider. options: service_provider_id: description:", "service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider( self,", "to in writing, software # distributed under the License is distributed on an", "- URL that is found in the service provider's metadata (Which is usually", "implied. # See the License for the specific language governing permissions and #", "return _needs_update(module, service_provider) if state == 'absent' and service_provider: return True return False", "self.client = keystone_client def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id')", "\"License\"); # you may not use this file except in compliance with the", "sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr,", "params_dict.items(): module_val = module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr, None): return True", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "sp on keystone idp description: - This module registers a keystone service provider", "description: - A globally unique id to identify the service provider example -sp.id", "not HAS_SHADE: module.fail_json(msg='shade is required for this module') sp_id = module.params['service_provider_id'] sp_url =", "sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description)", "module: keystone_service_provider short_description: register sp on keystone idp description: - This module registers", "the idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description:", "ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client def get_service_provider(self, sp_id): for sp in", "return True return False def _system_state_change(module, service_provider): state = module.params['state'] if state ==", "# limitations under the License. # try: import shade HAS_SHADE = True except", "return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client)", "sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return sp return None def", "default: present ''' def _needs_update(module, service_provider): \"\"\"Check for differences in the updatable values.", "or implied. # See the License for the specific language governing permissions and", "service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True else: if", "description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True,", "<reponame>pgraziano/ursula<gh_stars>100-1000 #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2016, IBM # Licensed", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True module.exit_json( changed=changed,", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "for differences in the updatable values. Note: Names cannot be updated. \"\"\" params_dict", "update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "'present': if not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed", "in writing, software # distributed under the License is distributed on an \"AS", "service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True module.exit_json(", "if not HAS_SHADE: module.fail_json(msg='shade is required for this module') sp_id = module.params['service_provider_id'] sp_url", "changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id)", "= ''' --- author: <NAME> module: keystone_service_provider short_description: register sp on keystone idp", "= True else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled,", "description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "keystone_client def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id:", "= cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id,", "def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None),", "= module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description'] state =", "== 'present': if not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description)", "state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default:", "sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description'] state", "_needs_update(module, service_provider) if state == 'absent' and service_provider: return True return False def", "= _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed", "service provider. state: description: - Indicate desired state of the resource choices: ['present',", "as e: module.fail_json(msg=\"service provider failed: %s\" % str(e)) # this is magic, see", "-sp.id required: true service_provider_url: description: - URL that is found in the service", "Indicate desired state of the resource choices: ['present', 'absent'] default: present ''' def", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "service_provider: return True return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3'", "None): return True return False def _system_state_change(module, service_provider): state = module.params['state'] if state", "return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True),", "is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if", "description of the service provider. state: description: - Indicate desired state of the", "- A globally unique id to identify the service provider example -sp.id required:", "'3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client =", "if getattr(sp, 'id') == sp_id: return sp return None def create_service_provider( self, sp_id,", "service_provider_auth_url: description: - URL that is used to authenticate with the identity provider", "required: true service_provider_auth_url: description: - URL that is used to authenticate with the", "class ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client def get_service_provider(self, sp_id): for sp", "_system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if state == 'present': if not service_provider:", "use this file except in compliance with the License. # You may obtain", "description: - A value of True enables the service provider and False disables", "description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider(", "updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items():", "updatable values. Note: Names cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled',", "A value of True enables the service provider and False disables it. default:", "_needs_update(module, service_provider): \"\"\"Check for differences in the updatable values. Note: Names cannot be", "description='description') for sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr, None) if module_val !=", "service provider example -sp.id required: true service_provider_url: description: - URL that is found", "module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr, None): return True return False def", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "service_provider: return True return _needs_update(module, service_provider) if state == 'absent' and service_provider: return", "ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client def get_service_provider(self,", "delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool',", "default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec,", "_needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True", "2.0 (the \"License\"); # you may not use this file except in compliance", "for this module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled", "description: description: The description of the service provider. state: description: - Indicate desired", "enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module", "for the specific language governing permissions and # limitations under the License. #", "auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr, None) if", "self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled,", "URL that is used to authenticate with the identity provider This URL should", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "False disables it. default: True description: description: The description of the service provider.", "module.params['state'] try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module,", "= True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- author: <NAME>", "required for this module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url']", "# # Unless required by applicable law or agreed to in writing, software", "should be available once the idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth'", "the service provider and False disables it. default: True description: description: The description", "this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import *", "express or implied. # See the License for the specific language governing permissions", "and service_provider: return True return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] =", "register sp on keystone idp description: - This module registers a keystone service", "description: The description of the service provider. state: description: - Indicate desired state", "either express or implied. # See the License for the specific language governing", "state == 'absent' and service_provider: return True return False def _get_cloud(**kwargs): cloud_shade =", "id to identify the service provider example -sp.id required: true service_provider_url: description: -", "'id') == sp_id: return sp return None def create_service_provider( self, sp_id, sp_url, sp_auth_url,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "that is found in the service provider's metadata (Which is usually found in", "to identify the service provider example -sp.id required: true service_provider_url: description: - URL", "cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr", "example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A value of True enables", "idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: -", "service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main():", "present ''' def _needs_update(module, service_provider): \"\"\"Check for differences in the updatable values. Note:", "False def _system_state_change(module, service_provider): state = module.params['state'] if state == 'present': if not", "state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception as", "authenticate with the identity provider This URL should be available once the idp", "create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url,", "used to authenticate with the identity provider This URL should be available once", "module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled']", "registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A", "the License. # You may obtain a copy of the License at #", "on the keystone identity provider. options: service_provider_id: description: - A globally unique id", "provider failed: %s\" % str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic", "module.fail_json(msg=\"service provider failed: %s\" % str(e)) # this is magic, see lib/ansible/module_common.py from", "service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False", "example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL that is used to authenticate", "# try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION", "params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items(): module_val =", "found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL that is", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "--- author: <NAME> module: keystone_service_provider short_description: register sp on keystone idp description: -", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Names cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr,", "description]) if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except", "= _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if state == 'present': if not", "AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') sp_id", "return False def _system_state_change(module, service_provider): state = module.params['state'] if state == 'present': if", "# Copyright 2016, IBM # Licensed under the Apache License, Version 2.0 (the", "= module.params['state'] if state == 'present': if not service_provider: return True return _needs_update(module,", "if state == 'present': if not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url,", "service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True),", "enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def", "description: - This module registers a keystone service provider on the keystone identity", "service_provider.auth_url, enabled, description]) if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed = True", "description: - URL that is used to authenticate with the identity provider This", "__init__(self, keystone_client): self.client = keystone_client def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if", "it. default: True description: description: The description of the service provider. state: description:", "description = module.params['description'] state = module.params['state'] try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id)", "str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack", "return sp return None def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider", "true service_provider_url: description: - URL that is found in the service provider's metadata", "idp description: - This module registers a keystone service provider on the keystone", "service provider and False disables it. default: True description: description: The description of", "with the License. # You may obtain a copy of the License at", "service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs", "this module') sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled =", "options: service_provider_id: description: - A globally unique id to identify the service provider", "sp_url, sp_auth_url, enabled, description) changed = True else: if _needs_update(module, service_provider): service_provider =", "''' --- author: <NAME> module: keystone_service_provider short_description: register sp on keystone idp description:", "<NAME> module: keystone_service_provider short_description: register sp on keystone idp description: - This module", "once the idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled:", "description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True),", "to authenticate with the identity provider This URL should be available once the", "service_provider.sp_url, service_provider.auth_url, enabled, description]) if state == 'absent': if service_provider: cloud.delete_service_provider(sp_id) changed =", "state == 'present': if not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled,", "= ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client def", "true enabled: description: - A value of True enables the service provider and", "This URL should be available once the idp registered on the sp example", "not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True", "= openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required", "_get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed =", "def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False,", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "identify the service provider example -sp.id required: true service_provider_url: description: - URL that", "if state == 'absent' and service_provider: return True return False def _get_cloud(**kwargs): cloud_shade", "identity provider This URL should be available once the idp registered on the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url,", "- This module registers a keystone service provider on the keystone identity provider.", "supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') sp_id =", "and False disables it. default: True description: description: The description of the service", "sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return sp return", "module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this", "True return _needs_update(module, service_provider) if state == 'absent' and service_provider: return True return", "''' def _needs_update(module, service_provider): \"\"\"Check for differences in the updatable values. Note: Names", "False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return", "cloud.get_service_provider(sp_id) if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if state", "service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id):", "keystone identity provider. options: service_provider_id: description: - A globally unique id to identify", "module.params['description'] state = module.params['state'] try: cloud = _get_cloud(**module.params) service_provider = cloud.get_service_provider(sp_id) if module.check_mode:", "on keystone idp description: - This module registers a keystone service provider on", "if module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if state ==", "getattr(sp, 'id') == sp_id: return sp return None def create_service_provider( self, sp_id, sp_url,", "keystone service provider on the keystone identity provider. options: service_provider_id: description: - A", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "== sp_id: return sp return None def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled,", "module.check_mode: changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if state == 'present':", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "The description of the service provider. state: description: - Indicate desired state of", "cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client = keystone_client def get_service_provider(self, sp_id): for", "available once the idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true", "True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- author: <NAME> module:", "#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2016, IBM # Licensed under", "dict(sp_url='service_provider_url', auth_url='service_provider_auth_url', enabled='enabled', description='description') for sp_attr, module_attr in params_dict.items(): module_val = module.params.get(module_attr, None)", "main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present',", "See the License for the specific language governing permissions and # limitations under", "state of the resource choices: ['present', 'absent'] default: present ''' def _needs_update(module, service_provider):", "service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True else:", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: - URL that is used to", "service_provider) if state == 'absent' and service_provider: return True return False def _get_cloud(**kwargs):", "cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider failed: %s\"", "False DOCUMENTATION = ''' --- author: <NAME> module: keystone_service_provider short_description: register sp on", "and # limitations under the License. # try: import shade HAS_SHADE = True", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "(Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true service_provider_auth_url: description: -", "of True enables the service provider and False disables it. default: True description:", "the keystone identity provider. options: service_provider_id: description: - A globally unique id to", "in the updatable values. Note: Names cannot be updated. \"\"\" params_dict = dict(sp_url='service_provider_url',", "cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True else: if _needs_update(module, service_provider):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "utf-8 -*- # Copyright 2016, IBM # Licensed under the Apache License, Version", "return True return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud", "the identity provider This URL should be available once the idp registered on", "else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed", "- A value of True enables the service provider and False disables it.", "self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled,", "-*- # Copyright 2016, IBM # Licensed under the Apache License, Version 2.0", "= openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']),", "under the License. # try: import shade HAS_SHADE = True except ImportError: HAS_SHADE", "failed: %s\" % str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import", "magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__", "choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not", "changed = _system_state_change(module, service_provider) module.exit_json(changed=changed) changed = False if state == 'present': if", "self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider( self, sp_id, sp_url,", "True description: description: The description of the service provider. state: description: - Indicate", "sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description = module.params['description'] state = module.params['state'] try:", "the service provider example -sp.id required: true service_provider_url: description: - URL that is", "short_description: register sp on keystone idp description: - This module registers a keystone", "None def create_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id,", "- Indicate desired state of the resource choices: ['present', 'absent'] default: present '''", "cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url,", "def _needs_update(module, service_provider): \"\"\"Check for differences in the updatable values. Note: Names cannot", "true service_provider_auth_url: description: - URL that is used to authenticate with the identity", "!= getattr(service_provider, sp_attr, None): return True return False def _system_state_change(module, service_provider): state =", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "return service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update(", "def _system_state_change(module, service_provider): state = module.params['state'] if state == 'present': if not service_provider:", "sp_id = module.params['service_provider_id'] sp_url = module.params['service_provider_url'] sp_auth_url = module.params['service_provider_auth_url'] enabled = module.params['enabled'] description", "service provider's metadata (Which is usually found in https://keystone.sp/Shibboleth.sso/metadata) example -https://keystone.sp/Shibboleth.sso/SAML2/ECP required: true", "service provider on the keystone identity provider. options: service_provider_id: description: - A globally", "auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider( self, sp_id, sp_url, sp_auth_url, enabled, description):", "True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url, enabled, description]) if state == 'absent': if", "# this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import", "= shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def", "permissions and # limitations under the License. # try: import shade HAS_SHADE =", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "author: <NAME> module: keystone_service_provider short_description: register sp on keystone idp description: - This", "auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def delete_service_provider(self, sp_id): self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec =", "be available once the idp registered on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required:", "shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' ---", "-'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A value of True enables the", "sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return", "IBM # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present", "module registers a keystone service provider on the keystone identity provider. options: service_provider_id:", "default: True description: description: The description of the service provider. state: description: -", "governing permissions and # limitations under the License. # try: import shade HAS_SHADE", "sp_id, sp_url, sp_auth_url, enabled, description) changed = True module.exit_json( changed=changed, service_provider=[service_provider.id, service_provider.sp_url, service_provider.auth_url,", "**module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') sp_id = module.params['service_provider_id']", "if service_provider: cloud.delete_service_provider(sp_id) changed = True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider", "limitations under the License. # try: import shade HAS_SHADE = True except ImportError:", "module_val != getattr(service_provider, sp_attr, None): return True return False def _system_state_change(module, service_provider): state", "True module.exit_json(changed=changed) except Exception as e: module.fail_json(msg=\"service provider failed: %s\" % str(e)) #", "if not service_provider: service_provider = cloud.create_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed =", "on the sp example -'http://keystone.sp/v3/OS-FEDERATION/' 'identity_providers/keystone-idp/protocols/saml2/auth' required: true enabled: description: - A value", "= keystone_client def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') ==", "enabled, description) changed = True else: if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id,", "changed = False if state == 'present': if not service_provider: service_provider = cloud.create_service_provider(", "for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return sp return None", "state = module.params['state'] if state == 'present': if not service_provider: return True return", "self.client.federation.service_providers.delete(service_provider=sp_id) def main(): argument_spec = openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False,", "e: module.fail_json(msg=\"service provider failed: %s\" % str(e)) # this is magic, see lib/ansible/module_common.py", "unique id to identify the service provider example -sp.id required: true service_provider_url: description:", "def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp, 'id') == sp_id: return", "= self.client.federation.service_providers.create( id=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return service_provider def update_service_provider( self, sp_id,", "openstack_full_argument_spec( service_provider_id=dict(required=True), service_provider_url=dict(required=True), service_provider_auth_url=dict(required=True), enabled=dict(required=False, type='bool', default=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), )", "= module.params['enabled'] description = module.params['description'] state = module.params['state'] try: cloud = _get_cloud(**module.params) service_provider", "in params_dict.items(): module_val = module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr, None): return", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "keystone_client): self.client = keystone_client def get_service_provider(self, sp_id): for sp in self.client.federation.service_providers.list(): if getattr(sp,", "= '3' cloud = ShadePlaceholder(cloud_shade.keystone_client) return cloud class ShadePlaceholder(object): def __init__(self, keystone_client): self.client", "if _needs_update(module, service_provider): service_provider = cloud.update_service_provider( sp_id, sp_url, sp_auth_url, enabled, description) changed =", "sp_url, sp_auth_url, enabled, description): service_provider = self.client.federation.service_providers.update( service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url, enabled=enabled, description=description) return", "True return False def _get_cloud(**kwargs): cloud_shade = shade.openstack_cloud(**kwargs) cloud_shade.cloud_config.config['identity_api_version'] = '3' cloud =", "not service_provider: return True return _needs_update(module, service_provider) if state == 'absent' and service_provider:", "= module.params.get(module_attr, None) if module_val != getattr(service_provider, sp_attr, None): return True return False", "return True return _needs_update(module, service_provider) if state == 'absent' and service_provider: return True", "the resource choices: ['present', 'absent'] default: present ''' def _needs_update(module, service_provider): \"\"\"Check for" ]
[ "init_project from sacred import Experiment from flow import FlowProject ex = Experiment() project", "job: 'bar' not in job.sp) # only run for non-branched @SacredProject.post(lambda job: 'weights'", "in job.sp) # only run for non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation", "@SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar) for", "flow import FlowProject ex = Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture", "only run for non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights", "@SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar)", "class SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar'", "def stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo in 8, 15, 16, 23,", "42: setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo, bar=bar)).init() if __name__ == '__main__':", "from signac import init_project from sacred import Experiment from flow import FlowProject ex", "= Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return", "func(parent.doc.weights, bar) for foo in 8, 15, 16, 23, 42: setup_stage2(foo=foo) for bar", "job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1)", "'result' in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo in 8,", "16, 23, 42: setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo, bar=bar)).init() if __name__", "@SacredProject.pre(lambda job: 'bar' not in job.sp) # only run for non-branched @SacredProject.post(lambda job:", "= project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def stage2(job): job.doc.result", "job.sp) # only run for non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def", "FlowProject ex = Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights,", "project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def stage2(job): job.doc.result =", "in job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo): parent", "import Experiment from flow import FlowProject ex = Experiment() project = init_project('signac-sacred-integration') class", "23, 42: setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo, bar=bar)).init() if __name__ ==", "from flow import FlowProject ex = Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass", "'bar' not in job.sp) # only run for non-branched @SacredProject.post(lambda job: 'weights' in", "= func(parent.doc.weights, bar) for foo in 8, 15, 16, 23, 42: setup_stage2(foo=foo) for", "for non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0']", "job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo): parent =", "@SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init()", "job: 'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def", "for foo in 8, 15, 16, 23, 42: setup_stage2(foo=foo) for bar in (True,", "bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp) # only run", "stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent))", "def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp) #", "8, 15, 16, 23, 42: setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo, bar=bar)).init()", "= init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda", "def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc)", "job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in", "['1.0'] * job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job:", "'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo):", "# only run for non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def stage1(job):", "* job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result'", "setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo, bar=bar)).init() if __name__ == '__main__': SacredProject().main()", "@ex.capture def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp)", "bar) for foo in 8, 15, 16, 23, 42: setup_stage2(foo=foo) for bar in", "= ['1.0'] * job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda", "job: 'result' in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo in", "not in job.sp) # only run for non-branched @SacredProject.post(lambda job: 'weights' in job.doc)", "setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def", "import init_project from sacred import Experiment from flow import FlowProject ex = Experiment()", "Experiment from flow import FlowProject ex = Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject):", "Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return None", "@SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo", "15, 16, 23, 42: setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo, bar=bar)).init() if", "init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job:", "job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo in 8, 15, 16,", "import FlowProject ex = Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def", "parent = project.open_job(dict(foo=foo)).init() @ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def stage2(job):", "signac import init_project from sacred import Experiment from flow import FlowProject ex =", "in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo in 8, 15,", "pass @ex.capture def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar' not in", "return None @ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp) # only run for", "non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights = ['1.0'] *", "project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return None @ex.capture", "job.doc.result = func(parent.doc.weights, bar) for foo in 8, 15, 16, 23, 42: setup_stage2(foo=foo)", "SacredProject(FlowProject): pass @ex.capture def func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar' not", "def stage1(job): job.doc.weights = ['1.0'] * job.sp.foo def setup_stage2(foo): parent = project.open_job(dict(foo=foo)).init() @ex.capture", "@SacredProject.post(lambda job: 'result' in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo", "from sacred import Experiment from flow import FlowProject ex = Experiment() project =", "stage2(job): job.doc.result = func(parent.doc.weights, bar) for foo in 8, 15, 16, 23, 42:", "in 8, 15, 16, 23, 42: setup_stage2(foo=foo) for bar in (True, False): project.open_job(dict(foo=foo,", "@ex.capture @SacredProject.operation('stage2[{}]'.format(parent)) @SacredProject.pre.after(stage1) @SacredProject.post(lambda job: 'result' in job.doc) def stage2(job): job.doc.result = func(parent.doc.weights,", "@ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp) # only run for non-branched @SacredProject.post(lambda", "sacred import Experiment from flow import FlowProject ex = Experiment() project = init_project('signac-sacred-integration')", "foo in 8, 15, 16, 23, 42: setup_stage2(foo=foo) for bar in (True, False):", "None @ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp) # only run for non-branched", "func(weights, bar): return None @ex.capture @SacredProject.pre(lambda job: 'bar' not in job.sp) # only", "run for non-branched @SacredProject.post(lambda job: 'weights' in job.doc) @SacredProject.operation def stage1(job): job.doc.weights =", "ex = Experiment() project = init_project('signac-sacred-integration') class SacredProject(FlowProject): pass @ex.capture def func(weights, bar):" ]
[ "# Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes", ":param session: The session used to initialize the init op :type session: tensorflow.python.client.session.Session", "= self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure itertor is", "self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current status to", "self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1.", "int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True)", "fallback return None @property def is_done(self): \"\"\" Returns True if the end of", "self.output_features = features # Otherwise, we create a brand new iterator else: self.iterator", "self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the current iterator :param session: The session", "epochs when doing infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) #", "init_op # If session is wrapped, executing it without hooks init_op = {TrainingMode.TRAINING:", "but no need to shuffle if self.cluster_config and self.num_shards > 1: shard_fn =", "fallback in fallbacks: if os.path.exists(fallback): return fallback return None @property def is_done(self): \"\"\"", "self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic", "is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features", "'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we just delete", "return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to the", "dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets self.build() @property def can_support_iterator(self): \"\"\"", "the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or #", "we load our status, otherwise we use the chief use_own_status = ((status['training_mode'] ==", "if not self.status_path: return # Trying to load from primary path if os.path.exists(self.status_path)", ":param cluster_config: Optional. If set, the cluster configuration will be used for distributed", "step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\"", "sure all workers can loop on the dataset at all times else: if", "\"request_id\" field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode", "nb_total_steps_per_epoch(self): \"\"\" Returns the total number of training and validation steps per epoch", "resume where we were \"\"\" status = {} status_loaded = False # Not", "output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure itertor is on the", "server). :param features: If an iterator_resource is specified, this corresponds to the output", "0 # Nb of batches to skip self.steps_in_current_mode = 0 # Step count", "self._iterator_initialized @property def status_path(self): \"\"\" Path to the status file on disk (where", "the proto-fields and generation methods :param checkpoint_dir: The directory where the status is", "os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we just delete the worker status", "= {} status_loaded = False # Not loading status if checkpoint_dir is None.", "is_done(self): \"\"\" Returns True if the end of file has been reached \"\"\"", "it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda", "def batch_size(self): \"\"\" Getter for batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self, value):", "to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" # We haven't created", "# Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free", "pickle.load(chief_status) else: chief_status = status # We couldn't find a status file to", "obtaining # a copy of this software and associated documentation files (the \"Software\"),", "self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number", "return True @property def batch_size(self): \"\"\" Getter for batch_size \"\"\" return self._batch_size @batch_size.setter", "Setter for batch_size \"\"\" if self.num_shards is not None: raise RuntimeError('You cannot change", "to permit persons to whom the Software is # furnished to do so,", "training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self):", "self.output_features = self.iterator.get_next() # Generating init op for each dataset # Using different", "using shards') self._batch_size = value @property def num_shards(self): \"\"\" Returns the number of", "the output of iterator.get_next() :return: Nothing, but sets the self.iterator, self.features, and dataset", "dataset in training mode :param session: The session used to initialize the init", "def save_status(self): \"\"\" Save current status to file to be able to resume", "training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and saving", "0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback): return fallback return None", "chief does not match. Resuming using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs =", "counter \"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode ==", "Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need to shuffle", "mark_as_done(self): \"\"\" Marks the dataset as having reached the end of the file\"\"\"", "= 0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done =", "self.training_init_op = None self.validation_init_op = None self.output_features = None # This represents iterator.get_next()", "SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the dataset \"\"\" self.is_closing = True self.training_dataset", "accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session)", "to validate that we have the same training_mode and nb_epochs if self.cluster_config and", "= do_infinite_training self.is_closing = False self.session = None # Creating empty datasets self.training_dataset", "otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an", "loop on the dataset at all times else: if self.cluster_config and self.num_shards >", "not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status: status = pickle.load(status)", "the same name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def", "an iterator yet if self.iterator is None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow", "the status folder if self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if", ":type session: tensorflow.python.client.session.Session \"\"\" # We haven't created an iterator yet if self.iterator", "disk and resume where we were \"\"\" status = {} status_loaded = False", "# Regular mode # Otherwise, sharding and shuffling the dataset # Repeating to", "= True self._dataset_is_done = False # For validation set, we can reset the", "and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)):", "queue # Steps self.nb_batches_to_skip = 0 # Nb of batches to skip self.steps_in_current_mode", "responsible for using a training and validation dataset to feed data to the", "worker status file else: os.unlink(self.status_path) # We load the fallback status if not", "\"\"\" Setter for batch_size \"\"\" if self.num_shards is not None: raise RuntimeError('You cannot", "with the same name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset)", "so we use it if iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None,", "from diplomacy_research.utils.tensorflow import tf # Running init_op # If session is wrapped, executing", "from queue # Steps self.nb_batches_to_skip = 0 # Nb of batches to skip", "Constructor :param batch_size: The size of a batch per tower :param dataset_builder: An", "self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we", "restriction, including without limitation the # rights to use, copy, modify, merge, publish,", "self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads", "dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets", "% 1 def mark_as_done(self): \"\"\" Marks the dataset as having reached the end", "return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current status to file to be", "close(self): \"\"\" Stops iterating the dataset \"\"\" self.is_closing = True self.training_dataset = None", "be shared under the given name across multiple sessions that share the same", "cluster_config: Optional. If set, the cluster configuration will be used for distributed training.", "else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False # For validation set, we", "\"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\"", "be used as default if features are missing from queue # Steps self.nb_batches_to_skip", "return fallback return None @property def is_done(self): \"\"\" Returns True if the end", "to make sure all workers can loop on the dataset at all times", "`BaseBuilder` containing the proto-fields and generation methods :param checkpoint_dir: The directory where the", "'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to the chief status path", "batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode", "not None and not self.no_iterator: LOGGER.error('An iterator resource can only be set if", "not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator object", "the training set can be accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode =", "to %d', status['num_shards'], self.num_shards) # If we are chief, we do a cleanup", "processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches in the training", "class SupervisedDataset(): \"\"\" This object is responsible for generating entries to feed the", "= no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training =", "cluster config is set), otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config else 1", "shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset =", "the file\"\"\" self._dataset_is_done = True def build(self): \"\"\" Builds the TensorFlow datasets \"\"\"", "TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if the iterator", "Returns the number of completed epochs, and the current % of the epoch", "= pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets self.build()", "per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\"", "2.5% train, valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\"", "status_loaded = True else: LOGGER.info('Number of shards has changed from %d to %d',", "used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset", "iterator object (optionally using a shared name and a specific iterator resource) :param", "Otherwise, we just delete the worker status file else: os.unlink(self.status_path) # We load", "'rb') as chief_status: chief_status = pickle.load(chief_status) else: chief_status = status # We couldn't", "and not self.no_iterator: LOGGER.error('An iterator resource can only be set if the dataset", "status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False if use_own_status: self.training_mode =", "\"\"\" return self.cluster_config.num_shards if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the", "Building the datasets self.build() @property def can_support_iterator(self): \"\"\" Determines if the dataset can", "the validation set. :param perc_epoch_for_training: If set, the training epoch will be for", "and to permit persons to whom the Software is # furnished to do", "# furnished to do so, subject to the following conditions: # # The", "end of file has been reached \"\"\" if self.do_infinite_training: return False return self._dataset_is_done", "the Software, and to permit persons to whom the Software is # furnished", "self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation steps per epoch", "in validation mode :param session: The session used to initialize the init op", "initialized \"\"\" return self._iterator_initialized @property def status_path(self): \"\"\" Path to the status file", "* self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\"", "if os.path.exists(fallback): return fallback return None @property def is_done(self): \"\"\" Returns True if", "mode # Only taking one batch and looping over that batch forever if", "items remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING", "number of training steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return", "containing the proto-fields and generation methods :param checkpoint_dir: The directory where the status", "in current mode self.training_progress = 0. # Number of items remaining in epoch", "Returns the number of validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size", "with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We have an iterator resource, so", "Steps self.nb_batches_to_skip = 0 # Nb of batches to skip self.steps_in_current_mode = 0", "Marks the dataset as having reached the end of the file\"\"\" self._dataset_is_done =", "of steps per epoch in the current mode (Training / Validation) \"\"\" if", "LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'], self.num_shards) # If", "None = disabled self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training", "dataset. There are %d shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn =", "are chief, we do a cleanup on the status folder if self.cluster_config and", "number of batches # to get to the same training point if self.training_mode", "pickle import numpy as np from diplomacy_research.settings import WORKING_DIR # Constants LOGGER =", "validation mode :param session: The session used to initialize the init op :type", "'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status, file,", "\"\"\" from enum import Enum import logging import os import math import multiprocessing", "use the chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) #", "/ (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the total number of", "was created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\") if iterator_resource", "raise RuntimeError(\"Cannot create new iterator\") if iterator_resource is not None and features is", "status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with", "the current iterator :param session: The session used to initialize the init op", "on the status folder if self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']):", "num_shards(self): \"\"\" Returns the number of shards (if a cluster config is set),", "can loop on the dataset at all times else: if self.cluster_config and self.num_shards", "Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn)", "# Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode':", "do_infinite_training: If set, supervised training will loop over the training set forever and", "tf.Tensor representing the iterator. :param shared_name: Optional. If non-empty, this iterator will be", "documentation files (the \"Software\"), # to deal in the Software without restriction, including", "datasets self.training_dataset = None self.validation_dataset = None self.feedable_dataset = None # Creating iterator", "(self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation steps", "if not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def", "if that's the case if self.num_shards == status['num_shards']: status_loaded = True else: LOGGER.info('Number", "of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index:", "import tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making", "sure itertor is on the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None):", "certain number of batches # to get to the same training point if", "current status to file to be able to resume later \"\"\" # Not", "We load the chief status to validate that we have the same training_mode", "distribute, sublicense, and/or # sell copies of the Software, and to permit persons", "else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init", "self.nb_completed_epochs = 0 self._dataset_is_done = False # Loading number of items remaining if", "@batch_size.setter def batch_size(self, value): \"\"\" Setter for batch_size \"\"\" if self.num_shards is not", "self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we were training the train", "over the training set forever and will not switch to the validation set.", "or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local step counter \"\"\"", "self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset =", "when using shards') self._batch_size = value @property def num_shards(self): \"\"\" Returns the number", "self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need to shuffle if self.cluster_config", "hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op))", "fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks:", "\"\"\" Path to the status file on disk (where progress is saved) \"\"\"", "validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def", "= self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20", "if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done =", "status path (to validate our status) \"\"\" if not self.cluster_config: return None return", "chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we were training", "0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False # Loading number", "cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else", "of generic default values from the output types and output shapes self.default_features =", "os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status = pickle.load(status) # Detecting", "\"\"\" Marks the dataset as having reached the end of the file\"\"\" self._dataset_is_done", "batch over-and-over to debug our model :param no_iterator: Boolean flag that indicates to", "os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path", "self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building", "features: self.output_features = features # Otherwise, we create a brand new iterator else:", "enum import Enum import logging import os import math import multiprocessing import pickle", "of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches in", "created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\") if iterator_resource is", "fallback_status_path(self): \"\"\" Path to an alternate status file if the primary is not", "file to load, aborting if not status_loaded: return # If we have the", "Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset", "batches # to get to the same training point if self.training_mode == TrainingMode.TRAINING:", "# NOTICE: Permission is hereby granted, free of charge, to any person obtaining", "self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the number of training steps", "output of iterator.get_next() :return: Nothing, but sets the self.iterator, self.features, and dataset init_ops", "\"\"\" Path to an alternate status file if the primary is not available", "having reached the end of the file\"\"\" self._dataset_is_done = True def build(self): \"\"\"", "we can't define initializers with the same name self._iterator_initialized = False self.training_init_op =", "False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the", "modes \"\"\" TRAINING = 'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This object", "self.session = None # Creating empty datasets self.training_dataset = None self.validation_dataset = None", "\"\"\" Increments the local step counter \"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode", "the iterator has been initialized \"\"\" return self._iterator_initialized @property def status_path(self): \"\"\" Path", "iterator features are required when reloading a saved iterator.') raise ValueError() # Loading", "if checkpoint_dir is None. if not self.status_path: return # Trying to load from", "# Trying to load from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path,", "we have the same value as the chief, we load our status, otherwise", "\"\"\" if iterator_resource is not None and not self.no_iterator: LOGGER.error('An iterator resource can", "and this permission notice shall be included in all # copies or substantial", "Debug (batch) mode # Only taking one batch and looping over that batch", "Returns the total number of training and validation steps per epoch \"\"\" return", "= chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 #", "(where progress is saved) \"\"\" if not self.checkpoint_dir: return None if not self.cluster_config:", "batch_size \"\"\" if self.num_shards is not None: raise RuntimeError('You cannot change the batch_size", "if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode,", "/ (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation", "Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(),", "prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size,", "saving status if checkpoint_dir is None if not self.status_path: return # Recomputing nb", "status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status: status = pickle.load(status) status_loaded", "used for distributed training. :param debug_batch: Boolean flag to indicate to return the", "status_loaded = False # Not loading status if checkpoint_dir is None. if not", "@property def nb_total_steps_per_epoch(self): \"\"\" Returns the total number of training and validation steps", "'rb') as status: status = pickle.load(status) # Detecting num of shards change and", "False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress", "+ 1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks the dataset as", "the same batch over-and-over to debug our model :param no_iterator: Boolean flag that", "file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status from disk and", "of training and validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property", "to file to be able to resume later \"\"\" # Not saving status", "TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker and chief does not match.", "available steps before running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train,", ":param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param", "= self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding and shuffling the dataset #", "make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import", "((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False", "current iterator :param session: The session used to initialize the init op :type", "True self._dataset_is_done = False # For validation set, we can reset the steps", "epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards)))", "deleting file if that's the case if self.num_shards == status['num_shards']: status_loaded = True", "self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if", "and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset']", "Loading status self._iterator_initialized = False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs']", "int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the total number", "full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\"", "'status-%03d.pkl' % status_ix)) # Otherwise, we just delete the worker status file else:", "set. :param perc_epoch_for_training: If set, the training epoch will be for this percentage", "be accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0", "import tf # Running init_op # If session is wrapped, executing it without", "copies of the Software, and to permit persons to whom the Software is", "'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback): return fallback return None @property def", "for distributed training. :param debug_batch: Boolean flag to indicate to return the same", "status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode =", "the batch_size when using shards') self._batch_size = value @property def num_shards(self): \"\"\" Returns", "/ self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current status to file", "for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id'])", "if checkpoint_dir is None if not self.status_path: return # Recomputing nb of completed", "file\"\"\" self._dataset_is_done = True def build(self): \"\"\" Builds the TensorFlow datasets \"\"\" from", "if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and saving if", "min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False self.session = None # Creating", "or substantial portions of the Software. # ============================================================================== \"\"\" Supervised Dataset - Class", "self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes)", "to have a \"request_id\" field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') #", "shared_name: Optional. If non-empty, this iterator will be shared under the given name", "Only taking one batch and looping over that batch forever if self.debug_batch: self.training_dataset", "share the same devices (e.g. when using a remote server). :param features: If", "of iterator.get_next() :return: Nothing, but sets the self.iterator, self.features, and dataset init_ops \"\"\"", "checkpoint_dir != '' else WORKING_DIR # None = disabled self.cluster_config = cluster_config self.debug_batch", "self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching", "cleanup on the status folder if self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards,", "chief_status = pickle.load(chief_status) else: chief_status = status # We couldn't find a status", "None if not self.status_path: return # Recomputing nb of completed epochs when doing", "same training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb')", "shall be included in all # copies or substantial portions of the Software.", "a shared name and a specific iterator resource) :param iterator_resource: A tf.resource scalar", "dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param checkpoint_dir:", "shuffle if self.cluster_config and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset =", "is responsible for generating entries to feed the model (using the tf.data.dataset API)", "as np from diplomacy_research.settings import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum):", "os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status) else: chief_status = status", "import math import multiprocessing import pickle import numpy as np from diplomacy_research.settings import", "be set if the dataset was created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot", "if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False self.session", "(Training / Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property", "dataset at all times else: if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset.", "= self.training_dataset.output_classes # Making sure itertor is on the right device/worker with tf.device(self.cluster_config.iterator_device", "self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\"", "= True else: LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'],", "load the chief status to validate that we have the same training_mode and", "status = pickle.load(status) # Detecting num of shards change and deleting file if", "loop over the training set forever and will not switch to the validation", "a copy of this software and associated documentation files (the \"Software\"), # to", "TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False # Loading number of items remaining", "\"\"\" Determine if the iterator has been initialized \"\"\" return self._iterator_initialized @property def", "been reached \"\"\" if self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode", "\"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING:", "Resuming by skipping a certain number of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming", "iterator resource, so we use it if iterator_resource is not None: self.iterator =", "status if not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status: status", "\"\"\" Path to the chief status path (to validate our status) \"\"\" if", "over-and-over to debug our model :param no_iterator: Boolean flag that indicates to not", "= int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession", "the model (using the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size,", "self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function,", "sell copies of the Software, and to permit persons to whom the Software", "def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator object (optionally using a", "%d', status['num_shards'], self.num_shards) # If we are chief, we do a cleanup on", "% 0) @property def fallback_status_path(self): \"\"\" Path to an alternate status file if", "validate that we have the same training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path)", "is None. if not self.status_path: return # Trying to load from primary path", "'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def", "when doing infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating", "step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False # For validation set,", "if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index)", "self.iterator, self.features, and dataset init_ops \"\"\" if iterator_resource is not None and not", "self.num_shards == status['num_shards']: status_loaded = True else: LOGGER.info('Number of shards has changed from", "= debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1.,", "the local step counter \"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1", "status_ix)) # Otherwise, we just delete the worker status file else: os.unlink(self.status_path) #", "pickle.load(status) status_loaded = True except EOFError: pass # We load the chief status", "TRAINING = 'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This object is responsible", "= {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] =", "(if a cluster config is set), otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config", "/ self.nb_training_steps_per_full_epoch) # Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status", "default if features are missing from queue # Steps self.nb_batches_to_skip = 0 #", "= True def build(self): \"\"\" Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import", "* self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new iterator_resource),", "not self.no_iterator: LOGGER.error('An iterator resource can only be set if the dataset was", "object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating", "empty datasets self.training_dataset = None self.validation_dataset = None self.feedable_dataset = None # Creating", "right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We have an iterator", "============================================================================== \"\"\" Supervised Dataset - Class responsible for using a training and validation", "from %d to %d', status['num_shards'], self.num_shards) # If we are chief, we do", "as chief_status: chief_status = pickle.load(chief_status) else: chief_status = status # We couldn't find", "(RPC) dataset \"\"\" return True @property def batch_size(self): \"\"\" Getter for batch_size \"\"\"", "mid-epoch (from load_status()) - So we keep the current value if self.training_mode ==", ":type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\" mode.", "to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is", "else: LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'], self.num_shards) #", "LOGGER.error('Dataset is currently in \"infinite training\" mode. Only the training set can be", "return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps", "substantial portions of the Software. # ============================================================================== \"\"\" Supervised Dataset - Class responsible", "current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by skipping", "self.steps_in_current_mode = 0 # Step count in current mode self.training_progress = 0. #", "to load from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as", "shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100", "self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status,", "# Running init_op # If session is wrapped, executing it without hooks init_op", "self.cluster_config.num_shards if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the number of", "tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False,", "import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training", "self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\" field.' # Training dataset self.training_dataset =", "on the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We have", "per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self):", "compression_type='GZIP') # Debug (batch) mode # Only taking one batch and looping over", "padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic default values from the output types", "set if the dataset was created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create", "API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False,", "Otherwise, sharding and shuffling the dataset # Repeating to make sure all workers", "* self.batch_size) # Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100", "perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current", "@property def chief_status_path(self): \"\"\" Path to the chief status path (to validate our", "the dataset at all times else: if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding", "ops self.iterator = None self._iterator_initialized = False self.training_init_op = None self.validation_init_op = None", "steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def", "is None if not self.status_path: return # Recomputing nb of completed epochs when", "self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): #", "output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure itertor", "we keep the current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 #", "with open(self.status_path, 'rb') as status: status = pickle.load(status) # Detecting num of shards", "self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding and shuffling the dataset # Repeating", "set can be accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode", "# Otherwise, sharding and shuffling the dataset # Repeating to make sure all", "= self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) #", "of this software and associated documentation files (the \"Software\"), # to deal in", "dataset init_ops \"\"\" if iterator_resource is not None and not self.no_iterator: LOGGER.error('An iterator", "get to the same training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress", "multiprocessing import pickle import numpy as np from diplomacy_research.settings import WORKING_DIR # Constants", "%d shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset", "cluster configuration will be used for distributed training. :param debug_batch: Boolean flag to", "(self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the total number of training", "the dataset in validation mode :param session: The session used to initialize the", "% of the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)", "from the beginning # For training, we might resume mid-epoch (from load_status()) -", "a certain number of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping", "(e.g. 2.5% train, valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig", "self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between", "= batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else", "from diplomacy_research.settings import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration", "def status_path(self): \"\"\" Path to the status file on disk (where progress is", "except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\" Starts the dataset", "diplomacy_research.utils.tensorflow import tf # Running init_op # If session is wrapped, executing it", "from enum import Enum import logging import os import math import multiprocessing import", "number of training and validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch", "= tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features # Otherwise,", "(self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks the dataset", "can be accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode =", "self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if the iterator has been initialized \"\"\"", ":return: Nothing, but sets the self.iterator, self.features, and dataset init_ops \"\"\" if iterator_resource", "we just delete the worker status file else: os.unlink(self.status_path) # We load the", "self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic default values from", "pylint: disable=invalid-name \"\"\" Returns the number of training steps per full epoch \"\"\"", "above copyright notice and this permission notice shall be included in all #", "None and features is None: LOGGER.error('The iterator features are required when reloading a", "mode (Training / Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch", "@property def batch_size(self): \"\"\" Getter for batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self,", "\"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\" mode. Only the training", "# copies or substantial portions of the Software. # ============================================================================== \"\"\" Supervised Dataset", "TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by skipping a certain number of already", "and dataset init_ops \"\"\" if iterator_resource is not None and not self.no_iterator: LOGGER.error('An", "= bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # -----------------------------", "and associated documentation files (the \"Software\"), # to deal in the Software without", "of shards change and deleting file if that's the case if self.num_shards ==", "!= '' else WORKING_DIR # None = disabled self.cluster_config = cluster_config self.debug_batch =", "in the Software without restriction, including without limitation the # rights to use,", "of file has been reached \"\"\" if self.do_infinite_training: return False return self._dataset_is_done or", "# Only taking one batch and looping over that batch forever if self.debug_batch:", "None @property def is_done(self): \"\"\" Returns True if the end of file has", "and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching", "num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator", "int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session):", "{} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('',", "over that batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) #", "only be set if the dataset was created with the \"no_iterator\" flag.') raise", "and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status = pickle.load(status) # Detecting num", "if iterator_resource is not None and features is None: LOGGER.error('The iterator features are", "shards (if a cluster config is set), otherwise None \"\"\" return self.cluster_config.num_shards if", "if iterator_resource is not None and not self.no_iterator: LOGGER.error('An iterator resource can only", "tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count())", "= 0 def start_training_mode(self, session): \"\"\" Starts the dataset in training mode :param", "to shuffle if self.cluster_config and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset", "def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the number of training steps per", "self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features #", "> 1: LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.', self.cluster_config.num_shards,", "self.training_dataset.output_classes # Making sure itertor is on the right device/worker with tf.device(self.cluster_config.iterator_device if", "status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl'", "LOGGER.warning('Status between worker and chief does not match. Resuming using chief status.') self.training_mode", "self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches in the training dataset.', self.nb_batches_to_skip) try:", "Path to an alternate status file if the primary is not available \"\"\"", "dataset can support an iterator or if it is a remote (RPC) dataset", "shared under the given name across multiple sessions that share the same devices", "TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\" TRAINING = 'train' VALIDATION = 'valid'", "keep the current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming", "# Will be used as default if features are missing from queue #", "training set forever and will not switch to the validation set. :param perc_epoch_for_training:", "Building a list of generic default values from the output types and output", "be saved. None to disable, '' for default dir. :param cluster_config: Optional. If", "None: raise RuntimeError('You cannot change the batch_size when using shards') self._batch_size = value", "using a training and validation dataset to feed data to the model through", "return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the total", "We load the fallback status if not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path,", "iterator_resource is not None and features is None: LOGGER.error('The iterator features are required", "The session used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if", "self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current status to file to be able", "<NAME> # # NOTICE: Permission is hereby granted, free of charge, to any", "- So we keep the current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode =", "otherwise we use the chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] ==", ":type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size", "self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path,", "'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\" Path to an alternate status file", "a remote server). :param features: If an iterator_resource is specified, this corresponds to", "if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset. There are %d shards. Current", "LOGGER.info('Resuming training by skipping %d batches in the training dataset.', self.nb_batches_to_skip) try: for", "a brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features =", "if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator", "our model :param no_iterator: Boolean flag that indicates to not create an iterator", "of the Software, and to permit persons to whom the Software is #", "an iterator or if it is a remote (RPC) dataset \"\"\" return True", "= TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the dataset", "free of charge, to any person obtaining # a copy of this software", "the total number of training and validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch", "output_classes = self.training_dataset.output_classes # Making sure itertor is on the right device/worker with", "# Building a list of generic default values from the output types and", "folder if self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status',", "to feed data to the model through tf.data.dataset \"\"\" from enum import Enum", "----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need", "initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently", "status folder if self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir,", "If set, supervised training will loop over the training set forever and will", "None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features", "return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\" Path", "if not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status: status =", "RuntimeError(\"Cannot create new iterator\") if iterator_resource is not None and features is None:", "pass # We load the chief status to validate that we have the", "primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status =", "of training steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards)))", "instance of `BaseBuilder` containing the proto-fields and generation methods :param checkpoint_dir: The directory", "of a batch per tower :param dataset_builder: An instance of `BaseBuilder` containing the", "except EOFError: pass # We load the chief status to validate that we", "for default dir. :param cluster_config: Optional. If set, the cluster configuration will be", "remote (RPC) dataset \"\"\" return True @property def batch_size(self): \"\"\" Getter for batch_size", "If set, the training epoch will be for this percentage of available steps", "features: If an iterator_resource is specified, this corresponds to the output of iterator.get_next()", "use it if iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes,", "chief_status_path(self): \"\"\" Path to the chief status path (to validate our status) \"\"\"", "self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding and shuffling the", "if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker and chief", "batch_size(self, value): \"\"\" Setter for batch_size \"\"\" if self.num_shards is not None: raise", "return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the dataset \"\"\" self.is_closing = True", "self.iterator = None self._iterator_initialized = False self.training_init_op = None self.validation_init_op = None self.output_features", "always starting from the beginning # For training, we might resume mid-epoch (from", "for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self):", "to the validation set. :param perc_epoch_for_training: If set, the training epoch will be", "same batch over-and-over to debug our model :param no_iterator: Boolean flag that indicates", "os.unlink(self.status_path) # We load the fallback status if not status_loaded and self.fallback_status_path: try:", "self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a", "# Recomputing nb of completed epochs when doing infinite training if self.do_infinite_training: self.nb_completed_epochs", "epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number", "dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder", "nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status", "int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self):", "self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self):", "used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs", "if self.num_shards is not None: raise RuntimeError('You cannot change the batch_size when using", "0 # Resuming by skipping a certain number of already processed items if", ">= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local step counter \"\"\" if not", "def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation steps per epoch \"\"\" return", "Not saving status if checkpoint_dir is None if not self.status_path: return # Recomputing", "Step count in current mode self.training_progress = 0. # Number of items remaining", "an iterator resource, so we use it if iterator_resource is not None: self.iterator", "dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context:", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "# We couldn't find a status file to load, aborting if not status_loaded:", "import multiprocessing import pickle import numpy as np from diplomacy_research.settings import WORKING_DIR #", "times else: if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset. There are %d", "if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress =", "os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto", "name and a specific iterator resource) :param iterator_resource: A tf.resource scalar tf.Tensor representing", "= status # We couldn't find a status file to load, aborting if", "(from load_status()) - So we keep the current value if self.training_mode == TrainingMode.VALIDATION:", "per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the", "before running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...) :type", "is None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf # Running init_op", "= self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current status", "self._batch_size = value @property def num_shards(self): \"\"\" Returns the number of shards (if", "0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number of completed epochs, and the", "if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return", "tf # Running init_op # If session is wrapped, executing it without hooks", "self.is_closing = False self.session = None # Creating empty datasets self.training_dataset = None", "of available steps before running another evaluation epoch (e.g. 2.5% train, valid, 2.5%", "Increments the local step counter \"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode +=", "Creates an iterator object (optionally using a shared name and a specific iterator", "set, the cluster configuration will be used for distributed training. :param debug_batch: Boolean", "the training set forever and will not switch to the validation set. :param", "If session is wrapped, executing it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION:", "Not loading status if checkpoint_dir is None. if not self.status_path: return # Trying", "session): \"\"\" Starts the dataset in validation mode :param session: The session used", "new iterator\") if iterator_resource is not None and features is None: LOGGER.error('The iterator", "# # NOTICE: Permission is hereby granted, free of charge, to any person", "that's the case if self.num_shards == status['num_shards']: status_loaded = True else: LOGGER.info('Number of", "TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number of completed", "pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status from disk and resume where we", "completed epochs when doing infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)", "self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status)", "and validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self):", "in the current mode (Training / Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return", "/ self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\"", "False # For validation set, we can reset the steps since we are", "not match. Resuming using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode", "for batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter for batch_size", "features are required when reloading a saved iterator.') raise ValueError() # Loading TensorFlow", "(e.g. when using a remote server). :param features: If an iterator_resource is specified,", "initializers with the same name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op =", "validate our status) \"\"\" if not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl'", "If non-empty, this iterator will be shared under the given name across multiple", "checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None = disabled self.cluster_config =", "each dataset # Using different names because we can't define initializers with the", "iterator_resource is not None and not self.no_iterator: LOGGER.error('An iterator resource can only be", "but sets the self.iterator, self.features, and dataset init_ops \"\"\" if iterator_resource is not", "(using the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='',", "else WORKING_DIR # None = disabled self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator", "= {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path,", "dataset status from disk and resume where we were \"\"\" status = {}", "open(self.status_path, 'rb') as status: status = pickle.load(status) # Detecting num of shards change", "exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards}", "of completed epochs when doing infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode /", "self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding and shuffling the dataset", "checkpoint_dir: The directory where the status is to be saved. None to disable,", "Permission is hereby granted, free of charge, to any person obtaining # a", "so, subject to the following conditions: # # The above copyright notice and", "point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\"", "the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed =", "\"\"\" Save current status to file to be able to resume later \"\"\"", "+= 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch)", "Using different names because we can't define initializers with the same name self._iterator_initialized", "\"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of", "been initialized \"\"\" return self._iterator_initialized @property def status_path(self): \"\"\" Path to the status", "* self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\"", "status file if the primary is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status',", "not create an iterator (it will be loaded from a ckpt) :param do_infinite_training:", "self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode", "items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches in the training dataset.',", "dataset in validation mode :param session: The session used to initialize the init", "otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\"", "if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we were training the", "dataset_index['size_valid_dataset'] # Building the datasets self.build() @property def can_support_iterator(self): \"\"\" Determines if the", "the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None,", "a remote (RPC) dataset \"\"\" return True @property def batch_size(self): \"\"\" Getter for", "we are chief, we do a cleanup on the status folder if self.cluster_config", "chief status to validate that we have the same training_mode and nb_epochs if", "and self.num_shards > 1: LOGGER.info('Sharding dataset. There are %d shards. Current shard index:", "to the model through tf.data.dataset \"\"\" from enum import Enum import logging import", "Enumeration of training modes \"\"\" TRAINING = 'train' VALIDATION = 'valid' class SupervisedDataset():", "None self.output_features = None # This represents iterator.get_next() self.default_features = {} # Will", "the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip", "mode # Otherwise, sharding and shuffling the dataset # Repeating to make sure", "None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id)", "loaded from a ckpt) :param do_infinite_training: If set, supervised training will loop over", "def build(self): \"\"\" Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert", "self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset", "# Step count in current mode self.training_progress = 0. # Number of items", "self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding", "it is a remote (RPC) dataset \"\"\" return True @property def batch_size(self): \"\"\"", "sets the self.iterator, self.features, and dataset init_ops \"\"\" if iterator_resource is not None", "return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\" Path to an", "The session used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" #", "train dataset, we need to skip a certain number of batches # to", "tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features # Otherwise, we", "0 else: LOGGER.warning('Status between worker and chief does not match. Resuming using chief", "\"\"\" Returns True if the end of file has been reached \"\"\" if", "self.output_features = None # This represents iterator.get_next() self.default_features = {} # Will be", "nb_training_steps_per_epoch(self): \"\"\" Returns the number of training steps per epoch \"\"\" nb_items_per_epoch =", "np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype)", "reached \"\"\" if self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def", "= self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching with", "# ----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no", "pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status from disk and resume", "on the dataset at all times else: if self.cluster_config and self.num_shards > 1:", "the fallback status if not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as", "\"\"\" Returns the number of shards (if a cluster config is set), otherwise", "software and associated documentation files (the \"Software\"), # to deal in the Software", "# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell", "be used for distributed training. :param debug_batch: Boolean flag to indicate to return", "from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes", "with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset =", "TensorFlow from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes =", "Resuming using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode']", "the worker status file else: os.unlink(self.status_path) # We load the fallback status if", "disable, '' for default dir. :param cluster_config: Optional. If set, the cluster configuration", "tensorflow.python.client.session.Session \"\"\" # We haven't created an iterator yet if self.iterator is None:", "value as the chief, we load our status, otherwise we use the chief", "hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False", "self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic default values", "model through tf.data.dataset \"\"\" from enum import Enum import logging import os import", "skipping a certain number of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by", "else None): # We have an iterator resource, so we use it if", "0 self._dataset_is_done = False # Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\", "self._dataset_is_done = True def build(self): \"\"\" Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow", "without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context:", "if self.cluster_config and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn)", "and chief does not match. Resuming using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs", "+ self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps per epoch", "self.checkpoint_dir: return None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl'", "we are always starting from the beginning # For training, we might resume", "at all times else: if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset. There", "represents iterator.get_next() self.default_features = {} # Will be used as default if features", "train, valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" #", "math import multiprocessing import pickle import numpy as np from diplomacy_research.settings import WORKING_DIR", "as status: status = pickle.load(status) # Detecting num of shards change and deleting", "pickle.load(status) # Detecting num of shards change and deleting file if that's the", "nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps per epoch in the current mode", "tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init op for each", "'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to the chief status", "the model through tf.data.dataset \"\"\" from enum import Enum import logging import os", "saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode':", "no_iterator: Boolean flag that indicates to not create an iterator (it will be", "to skip a certain number of batches # to get to the same", "and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs,", "status_path(self): \"\"\" Path to the status file on disk (where progress is saved)", "the end of file has been reached \"\"\" if self.do_infinite_training: return False return", "hereby granted, free of charge, to any person obtaining # a copy of", "is specified, this corresponds to the output of iterator.get_next() :return: Nothing, but sets", "# For validation set, we can reset the steps since we are always", "(batch) mode # Only taking one batch and looping over that batch forever", "batch_size when using shards') self._batch_size = value @property def num_shards(self): \"\"\" Returns the", "as having reached the end of the file\"\"\" self._dataset_is_done = True def build(self):", "self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self):", "whom the Software is # furnished to do so, subject to the following", "\\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto =", "= self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self):", "tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\" Starts the dataset in", "iterating the dataset \"\"\" self.is_closing = True self.training_dataset = None self.validation_dataset = None", "else: LOGGER.warning('Status between worker and chief does not match. Resuming using chief status.')", "the given name across multiple sessions that share the same devices (e.g. when", "checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size of", "not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self):", "'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback): return", "logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\" TRAINING = 'train' VALIDATION", "by skipping %d batches in the training dataset.', self.nb_batches_to_skip) try: for _ in", "local step counter \"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if", "is # furnished to do so, subject to the following conditions: # #", "self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter for batch_size \"\"\" if self.num_shards is", "LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\" TRAINING =", "training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\"", "if self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl'", "no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training", "'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] #", "by skipping a certain number of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training", "TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for", "from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the dataset", "TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\"", "self.num_shards is not None: raise RuntimeError('You cannot change the batch_size when using shards')", "checkpoint_dir is None. if not self.status_path: return # Trying to load from primary", "class TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\" TRAINING = 'train' VALIDATION =", "if self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\"", "def get_progress(self): \"\"\" Returns the number of completed epochs, and the current %", "1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the number of training steps per epoch", "using a shared name and a specific iterator resource) :param iterator_resource: A tf.resource", "same name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self,", "self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets self.build() @property def", "shards') self._batch_size = value @property def num_shards(self): \"\"\" Returns the number of shards", "{} status_loaded = False # Not loading status if checkpoint_dir is None. if", "in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype =", "is not None and features is None: LOGGER.error('The iterator features are required when", "items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index", "self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\" Starts the dataset in training mode", "self.default_features = {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name]", "init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else:", "None. if not self.status_path: return # Trying to load from primary path if", "status_loaded = True except EOFError: pass # We load the chief status to", "def load_status(self): \"\"\" Loads dataset status from disk and resume where we were", "as default if features are missing from queue # Steps self.nb_batches_to_skip = 0", "to the chief status path (to validate our status) \"\"\" if not self.cluster_config:", "Returns True if the end of file has been reached \"\"\" if self.do_infinite_training:", "copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software,", "an iterator object (optionally using a shared name and a specific iterator resource)", "step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False # For validation", "self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def", "use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized", "default values from the output types and output shapes self.default_features = {} for", "number of completed epochs, and the current % of the epoch completed \"\"\"", "specific iterator resource) :param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator. :param", "a saved iterator.') raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types", "= True except EOFError: pass # We load the chief status to validate", "diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the dataset \"\"\"", "without restriction, including without limitation the # rights to use, copy, modify, merge,", "are %d shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index)", "import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the dataset \"\"\" self.is_closing", "is saved) \"\"\" if not self.checkpoint_dir: return None if not self.cluster_config: return os.path.join(self.checkpoint_dir,", "open(self.fallback_status_path, 'rb') as status: status = pickle.load(status) status_loaded = True except EOFError: pass", "= 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the dataset in validation mode", "not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress':", "batch_size: The size of a batch per tower :param dataset_builder: An instance of", "if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1", "session is wrapped, executing it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode]", "self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1 def", "shuffling the dataset # Repeating to make sure all workers can loop on", "= chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we were", "our status, otherwise we use the chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and", "disabled self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00", "assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\" field.' # Training", "flag.') raise RuntimeError(\"Cannot create new iterator\") if iterator_resource is not None and features", ":param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator. :param shared_name: Optional. If", "\"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the", "self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new", "self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks the dataset as having reached the", "self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps per", "furnished to do so, subject to the following conditions: # # The above", "output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features # Otherwise, we create a", "perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size of a batch per tower :param", "Returns the number of training steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto /", "\"\"\" TRAINING = 'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This object is", "Creating iterator (with a new iterator_resource), unless specified otherwise if not self.no_iterator: self.create_iterator()", "# We have an iterator resource, so we use it if iterator_resource is", "tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\" mode. Only the", "devices (e.g. when using a remote server). :param features: If an iterator_resource is", "= False # Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path):", "= False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes", "of training modes \"\"\" TRAINING = 'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\"", "a cluster config is set), otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config else", "with init ops self.iterator = None self._iterator_initialized = False self.training_init_op = None self.validation_init_op", "self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with", "'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\" field.' # Training dataset", "@property def fallback_status_path(self): \"\"\" Path to an alternate status file if the primary", "executing it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'):", "and output shapes self.default_features = {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name]", "self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id']))", "def can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator or if", "beginning # For training, we might resume mid-epoch (from load_status()) - So we", "new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() #", "self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save", "mode self.training_progress = 0. # Number of items remaining in epoch self.total_nb_items_training_proto =", "hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip =", "file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status from disk and resume where", "self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION:", "= np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') #", "self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0", "Will be used as default if features are missing from queue # Steps", "def mark_as_done(self): \"\"\" Marks the dataset as having reached the end of the", "modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and", "and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status:", "dataset # Repeating to make sure all workers can loop on the dataset", "None self._iterator_initialized = False self.training_init_op = None self.validation_init_op = None self.output_features = None", "self.iterator.get_next() # Generating init op for each dataset # Using different names because", "mode. Only the training set can be accessed.') raise RuntimeError('Invalid training mode specified.')", "self.batch_size) # Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 *", "save_status(self): \"\"\" Save current status to file to be able to resume later", "with open(self.fallback_status_path, 'rb') as status: status = pickle.load(status) status_loaded = True except EOFError:", "distributed training. :param debug_batch: Boolean flag to indicate to return the same batch", "self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating", "self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to the chief status path (to validate", "# If we are chief, we do a cleanup on the status folder", "Making sure itertor is on the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else", "the number of shards (if a cluster config is set), otherwise None \"\"\"", "= self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of", "= tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need to shuffle if self.cluster_config and", "LOGGER.error('The iterator features are required when reloading a saved iterator.') raise ValueError() #", "in \"infinite training\" mode. Only the training set can be accessed.') raise RuntimeError('Invalid", "to load, aborting if not status_loaded: return # If we have the same", "need to skip a certain number of batches # to get to the", "Otherwise, we create a brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes,", "training mode :param session: The session used to initialize the init op :type", "step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session):", "and will not switch to the validation set. :param perc_epoch_for_training: If set, the", "# If we were training the train dataset, we need to skip a", "= False # For validation set, we can reset the steps since we", "self.validation_init_op = None self.output_features = None # This represents iterator.get_next() self.default_features = {}", "EOFError: pass # We load the chief status to validate that we have", "notice shall be included in all # copies or substantial portions of the", "Enum import logging import os import math import multiprocessing import pickle import numpy", "0 # Step count in current mode self.training_progress = 0. # Number of", "not self.status_path: return # Trying to load from primary path if os.path.exists(self.status_path) and", "return # Trying to load from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with", "the output types and output shapes self.default_features = {} for feature_name, feature_shape in", "True @property def batch_size(self): \"\"\" Getter for batch_size \"\"\" return self._batch_size @batch_size.setter def", "for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8')", "already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches in the", "chief status path (to validate our status) \"\"\" if not self.cluster_config: return None", "in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError:", "self._dataset_is_done = False # Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and", "self.no_iterator: LOGGER.error('An iterator resource can only be set if the dataset was created", "self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name]", "self.iterator is None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf # Running", "under the given name across multiple sessions that share the same devices (e.g.", "to be saved. None to disable, '' for default dir. :param cluster_config: Optional.", "None # This represents iterator.get_next() self.default_features = {} # Will be used as", "self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the dataset in validation mode :param session:", "alternate status file if the primary is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir,", "iterator_resource), unless specified otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None):", "shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) #", "self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION:", "# Otherwise, we just delete the worker status file else: os.unlink(self.status_path) # We", "not status_loaded: return # If we have the same value as the chief,", "== TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook", "shared name and a specific iterator resource) :param iterator_resource: A tf.resource scalar tf.Tensor", "use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress']", "None \"\"\" return self.cluster_config.num_shards if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns", "\"\"\" if self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self):", "workers can loop on the dataset at all times else: if self.cluster_config and", "= self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new iterator_resource), unless specified otherwise", "op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\"", "False self.session = None # Creating empty datasets self.training_dataset = None self.validation_dataset =", "self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode # Only taking one batch", "Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\" TRAINING", "we were training the train dataset, we need to skip a certain number", "is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')]", "epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns", "self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\" Starts the dataset in training", "self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the", "for batch_size \"\"\" if self.num_shards is not None: raise RuntimeError('You cannot change the", "# Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode # Only", "self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name", "Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free of", "\"\"\" Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id' in", "taking one batch and looping over that batch forever if self.debug_batch: self.training_dataset =", "None): # We have an iterator resource, so we use it if iterator_resource", "if self.iterator is None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf #", "to get to the same training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip =", "use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the", "# Loading TensorFlow from diplomacy_research.utils.tensorflow import tf # Running init_op # If session", "validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\"", "not None and features is None: LOGGER.error('The iterator features are required when reloading", "= tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 *", "to an alternate status file if the primary is not available \"\"\" fallbacks", "\"\"\" Stops iterating the dataset \"\"\" self.is_closing = True self.training_dataset = None self.validation_dataset", "Detecting num of shards change and deleting file if that's the case if", "of charge, to any person obtaining # a copy of this software and", "SupervisedDataset(): \"\"\" This object is responsible for generating entries to feed the model", "self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP')", "self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" from", "generic default values from the output types and output shapes self.default_features = {}", "supervised training will loop over the training set forever and will not switch", "self.status_path: return # Recomputing nb of completed epochs when doing infinite training if", "* self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint:", "def chief_status_path(self): \"\"\" Path to the chief status path (to validate our status)", "tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need to shuffle if self.cluster_config and self.num_shards", "= tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode # Only taking one batch and", "# pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.):", "dataset \"\"\" return True @property def batch_size(self): \"\"\" Getter for batch_size \"\"\" return", "Supervised Dataset - Class responsible for using a training and validation dataset to", "is not None: raise RuntimeError('You cannot change the batch_size when using shards') self._batch_size", "path (to validate our status) \"\"\" if not self.cluster_config: return None return os.path.join(self.checkpoint_dir,", "= int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def", "tf.data.dataset \"\"\" from enum import Enum import logging import os import math import", "This represents iterator.get_next() self.default_features = {} # Will be used as default if", "We haven't created an iterator yet if self.iterator is None: return # Loading", "certain number of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d", "for fallback in fallbacks: if os.path.exists(fallback): return fallback return None @property def is_done(self):", "status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we just delete the worker", "of items remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode =", "return self._iterator_initialized @property def status_path(self): \"\"\" Path to the status file on disk", "the iterator. :param shared_name: Optional. If non-empty, this iterator will be shared under", "forever and will not switch to the validation set. :param perc_epoch_for_training: If set,", "int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of", "'You need to have a \"request_id\" field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path,", "return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\"", "self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding and", "status file else: os.unlink(self.status_path) # We load the fallback status if not status_loaded", "used as default if features are missing from queue # Steps self.nb_batches_to_skip =", "self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks", "self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] =", "to whom the Software is # furnished to do so, subject to the", "(with a new iterator_resource), unless specified otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self,", "cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir", "os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to the chief", "\"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the", "be included in all # copies or substantial portions of the Software. #", "start_training_mode(self, session): \"\"\" Starts the dataset in training mode :param session: The session", "it if iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes)", "# None = disabled self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator", "return False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the", "self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments", "feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8') else:", "self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing", "to the status file on disk (where progress is saved) \"\"\" if not", "return None @property def is_done(self): \"\"\" Returns True if the end of file", "a certain number of batches # to get to the same training point", "= cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training", "1 def mark_as_done(self): \"\"\" Marks the dataset as having reached the end of", "= 0 # Resuming by skipping a certain number of already processed items", "in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' %", "RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self):", "copy of this software and associated documentation files (the \"Software\"), # to deal", "haven't created an iterator yet if self.iterator is None: return # Loading TensorFlow", "chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs", "TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the dataset in", "dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the", "no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size of a batch per", "= self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the current iterator :param session: The", "the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in", "current mode (Training / Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return", "reloading a saved iterator.') raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf", "session): \"\"\" Starts the dataset in training mode :param session: The session used", "Initializes the current iterator :param session: The session used to initialize the init", "self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator object (optionally using", "return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number", "training, we might resume mid-epoch (from load_status()) - So we keep the current", "= 0 else: LOGGER.warning('Status between worker and chief does not match. Resuming using", "prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size,", "included in all # copies or substantial portions of the Software. # ==============================================================================", "of the Software. # ============================================================================== \"\"\" Supervised Dataset - Class responsible for using", "load the fallback status if not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb')", "# Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size)", "self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized =", "self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset =", "Creating empty datasets self.training_dataset = None self.validation_dataset = None self.feedable_dataset = None #", "{TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized", "of completed epochs, and the current % of the epoch completed \"\"\" if", "status: status = pickle.load(status) # Detecting num of shards change and deleting file", "os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto", "self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number of completed epochs,", "op :type session: tensorflow.python.client.session.Session \"\"\" # We haven't created an iterator yet if", "\"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the", "debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size of a batch", "associated documentation files (the \"Software\"), # to deal in the Software without restriction,", "iterator :param session: The session used to initialize the init op :type session:", "self.nb_training_steps_per_full_epoch) # Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status =", "per tower :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation", "self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode =", "= status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker", "able to resume later \"\"\" # Not saving status if checkpoint_dir is None", "iterator.get_next() :return: Nothing, but sets the self.iterator, self.features, and dataset init_ops \"\"\" if", "the cluster configuration will be used for distributed training. :param debug_batch: Boolean flag", "steps since we are always starting from the beginning # For training, we", "this permission notice shall be included in all # copies or substantial portions", "directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs':", "with open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset", "= [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if", "the chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading", "if self.num_shards == status['num_shards']: status_loaded = True else: LOGGER.info('Number of shards has changed", "and resume where we were \"\"\" status = {} status_loaded = False #", "# Loading status self._iterator_initialized = False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs =", "is hereby granted, free of charge, to any person obtaining # a copy", "not self.checkpoint_dir: return None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status',", "self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps per epoch in", "None: LOGGER.error('The iterator features are required when reloading a saved iterator.') raise ValueError()", "iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating", "to deal in the Software without restriction, including without limitation the # rights", "portions of the Software. # ============================================================================== \"\"\" Supervised Dataset - Class responsible for", "to feed the model (using the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def", "The size of a batch per tower :param dataset_builder: An instance of `BaseBuilder`", "following conditions: # # The above copyright notice and this permission notice shall", "do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size of a batch per tower", "session used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done:", "validation set, we can reset the steps since we are always starting from", "resource can only be set if the dataset was created with the \"no_iterator\"", "steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self):", "mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns", "and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status: status = pickle.load(status) status_loaded =", "# Making sure itertor is on the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config", "per epoch in the current mode (Training / Validation) \"\"\" if self.training_mode ==", "If set, the cluster configuration will be used for distributed training. :param debug_batch:", "steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size", "If an iterator_resource is specified, this corresponds to the output of iterator.get_next() :return:", "has been reached \"\"\" if self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode >=", "For validation set, we can reset the steps since we are always starting", "= None # This represents iterator.get_next() self.default_features = {} # Will be used", "= {} # Will be used as default if features are missing from", "and/or # sell copies of the Software, and to permit persons to whom", "not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def", "were \"\"\" status = {} status_loaded = False # Not loading status if", "= False self.training_init_op = None self.validation_init_op = None self.output_features = None # This", "scalar tf.Tensor representing the iterator. :param shared_name: Optional. If non-empty, this iterator will", "file has been reached \"\"\" if self.do_infinite_training: return False return self._dataset_is_done or self.steps_in_current_mode", "== status['num_shards']: status_loaded = True else: LOGGER.info('Number of shards has changed from %d", "= 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number of completed epochs, and", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies", "For training, we might resume mid-epoch (from load_status()) - So we keep the", "0 # If we were training the train dataset, we need to skip", "copyright notice and this permission notice shall be included in all # copies", "one batch and looping over that batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size)", "debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training))", "logging import os import math import multiprocessing import pickle import numpy as np", "batch and looping over that batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset", "= ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized =", "have the same value as the chief, we load our status, otherwise we", "to resume later \"\"\" # Not saving status if checkpoint_dir is None if", "the steps since we are always starting from the beginning # For training,", "the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\"", "completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode /", "os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status = pickle.load(status) # Detecting num of", "the status is to be saved. None to disable, '' for default dir.", "with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\") if iterator_resource is not", ":type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress *", "features are missing from queue # Steps self.nb_batches_to_skip = 0 # Nb of", "limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "os import math import multiprocessing import pickle import numpy as np from diplomacy_research.settings", "the status file on disk (where progress is saved) \"\"\" if not self.checkpoint_dir:", "= status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode", "with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset =", "merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to", "total number of training and validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch +", "perc_epoch_for_training: If set, the training epoch will be for this percentage of available", "Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes", "{} # Will be used as default if features are missing from queue", "tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We have an iterator resource, so we", "Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self):", "def start_training_mode(self, session): \"\"\" Starts the dataset in training mode :param session: The", "# Using different names because we can't define initializers with the same name", "2019 - <NAME> # # NOTICE: Permission is hereby granted, free of charge,", "iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator object (optionally using a shared name", "If we have the same value as the chief, we load our status,", "number of validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards)))", "name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session):", "the training dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn(", "'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status from", "tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode", "will be loaded from a ckpt) :param do_infinite_training: If set, supervised training will", "a training and validation dataset to feed data to the model through tf.data.dataset", "value): \"\"\" Setter for batch_size \"\"\" if self.num_shards is not None: raise RuntimeError('You", ":param shared_name: Optional. If non-empty, this iterator will be shared under the given", "were training the train dataset, we need to skip a certain number of", "self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3,", "on disk (where progress is saved) \"\"\" if not self.checkpoint_dir: return None if", "all # copies or substantial portions of the Software. # ============================================================================== \"\"\" Supervised", "resume mid-epoch (from load_status()) - So we keep the current value if self.training_mode", "debug our model :param no_iterator: Boolean flag that indicates to not create an", "fallback status if not status_loaded and self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status:", "self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs,", "from diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a", "'' else WORKING_DIR # None = disabled self.cluster_config = cluster_config self.debug_batch = debug_batch", "the case if self.num_shards == status['num_shards']: status_loaded = True else: LOGGER.info('Number of shards", "the train dataset, we need to skip a certain number of batches #", "self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done", "from disk and resume where we were \"\"\" status = {} status_loaded =", "copies or substantial portions of the Software. # ============================================================================== \"\"\" Supervised Dataset -", "== TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by skipping a certain number of", "self.status_path: return # Trying to load from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path):", "responsible for generating entries to feed the model (using the tf.data.dataset API) \"\"\"", "do_infinite_training self.is_closing = False self.session = None # Creating empty datasets self.training_dataset =", "can reset the steps since we are always starting from the beginning #", "is wrapped, executing it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if", "the number of steps per epoch in the current mode (Training / Validation)", "specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the", "Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return", "of shards (if a cluster config is set), otherwise None \"\"\" return self.cluster_config.num_shards", "chief, we do a cleanup on the status folder if self.cluster_config and self.cluster_config.is_chief:", "if self.cluster_config else None): # We have an iterator resource, so we use", "= None self._iterator_initialized = False self.training_init_op = None self.validation_init_op = None self.output_features =", "= 0 # Nb of batches to skip self.steps_in_current_mode = 0 # Step", "the Software without restriction, including without limitation the # rights to use, copy,", "fallbacks: if os.path.exists(fallback): return fallback return None @property def is_done(self): \"\"\" Returns True", "of the file\"\"\" self._dataset_is_done = True def build(self): \"\"\" Builds the TensorFlow datasets", "# to deal in the Software without restriction, including without limitation the #", "# ============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby", "\"\"\" Loads dataset status from disk and resume where we were \"\"\" status", "per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size *", "% status_ix)) # Otherwise, we just delete the worker status file else: os.unlink(self.status_path)", "if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status =", "2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments", "= self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic default values from the", "set, the training epoch will be for this percentage of available steps before", "delete the worker status file else: os.unlink(self.status_path) # We load the fallback status", "self._iterator_initialized = False self.training_init_op = None self.validation_init_op = None self.output_features = None #", "for generating entries to feed the model (using the tf.data.dataset API) \"\"\" #", "to disable, '' for default dir. :param cluster_config: Optional. If set, the cluster", "self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False #", "file if the primary is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl'", "if not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress", "for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status',", "# Not saving status if checkpoint_dir is None if not self.status_path: return #", "So we keep the current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0", "looping over that batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1)", "status = pickle.load(status) status_loaded = True except EOFError: pass # We load the", "is to be saved. None to disable, '' for default dir. :param cluster_config:", "iterator.get_next() self.default_features = {} # Will be used as default if features are", "no need to shuffle if self.cluster_config and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,", "train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size", "nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def", "dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but", "self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\"", "= pickle.load(chief_status) else: chief_status = status # We couldn't find a status file", "iterator. :param shared_name: Optional. If non-empty, this iterator will be shared under the", "we do a cleanup on the status folder if self.cluster_config and self.cluster_config.is_chief: for", "to the following conditions: # # The above copyright notice and this permission", "lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self,", "the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\") if iterator_resource is not None", "current % of the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode /", "nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto", "that we have the same training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and", "unless specified otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\"", "Generating init op for each dataset # Using different names because we can't", "# Number of items remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0", "dataset as having reached the end of the file\"\"\" self._dataset_is_done = True def", "Software is # furnished to do so, subject to the following conditions: #", "Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb')", "not self.status_path: return # Recomputing nb of completed epochs when doing infinite training", "and a specific iterator resource) :param iterator_resource: A tf.resource scalar tf.Tensor representing the", "and validation dataset to feed data to the model through tf.data.dataset \"\"\" from", "status to validate that we have the same training_mode and nb_epochs if self.cluster_config", "session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)", "if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine", "None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\" Path to", "training the train dataset, we need to skip a certain number of batches", "init ops self.iterator = None self._iterator_initialized = False self.training_init_op = None self.validation_init_op =", "and deleting file if that's the case if self.num_shards == status['num_shards']: status_loaded =", "% self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to the chief status path (to", "for using a training and validation dataset to feed data to the model", "wrapped, executing it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session,", "self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local step counter \"\"\" if", "the number of validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size *", "are always starting from the beginning # For training, we might resume mid-epoch", "self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress", "\"\"\" Getter for batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter", ":param debug_batch: Boolean flag to indicate to return the same batch over-and-over to", "permission notice shall be included in all # copies or substantial portions of", "== np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:],", "Stops iterating the dataset \"\"\" self.is_closing = True self.training_dataset = None self.validation_dataset =", "tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure", "#%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat()", "if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode # Otherwise,", "number of training steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size *", "chief_status: chief_status = pickle.load(chief_status) else: chief_status = status # We couldn't find a", "iterator_initialized(self): \"\"\" Determine if the iterator has been initialized \"\"\" return self._iterator_initialized @property", "= TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number of", "self._dataset_is_done = False # For validation set, we can reset the steps since", "are missing from queue # Steps self.nb_batches_to_skip = 0 # Nb of batches", "else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the number of training steps per", "currently in \"infinite training\" mode. Only the training set can be accessed.') raise", "Nothing, but sets the self.iterator, self.features, and dataset init_ops \"\"\" if iterator_resource is", "given name across multiple sessions that share the same devices (e.g. when using", "the following conditions: # # The above copyright notice and this permission notice", "training will loop over the training set forever and will not switch to", "multiple sessions that share the same devices (e.g. when using a remote server).", "is currently in \"infinite training\" mode. Only the training set can be accessed.')", "@property def iterator_initialized(self): \"\"\" Determine if the iterator has been initialized \"\"\" return", "self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 *", "of validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property", "return # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf # Running init_op # If", "training and validation steps per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def", "is not None and not self.no_iterator: LOGGER.error('An iterator resource can only be set", "# If session is wrapped, executing it without hooks init_op = {TrainingMode.TRAINING: self.training_init_op,", "this corresponds to the output of iterator.get_next() :return: Nothing, but sets the self.iterator,", "iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features:", "build(self): \"\"\" Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id'", "file to be able to resume later \"\"\" # Not saving status if", "completed epochs, and the current % of the epoch completed \"\"\" if self.do_infinite_training:", "to do so, subject to the following conditions: # # The above copyright", "0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the dataset in validation mode :param", "feed the model (using the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self,", "% status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we just delete the", "Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode,", "this percentage of available steps before running another evaluation epoch (e.g. 2.5% train,", "status = {} status_loaded = False # Not loading status if checkpoint_dir is", "# pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if", "for this percentage of available steps before running another evaluation epoch (e.g. 2.5%", "set, we can reset the steps since we are always starting from the", "else: chief_status = status # We couldn't find a status file to load,", "the dataset as having reached the end of the file\"\"\" self._dataset_is_done = True", "the training epoch will be for this percentage of available steps before running", "Software. # ============================================================================== \"\"\" Supervised Dataset - Class responsible for using a training", "remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index =", "= self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure itertor is on the right", "self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching", "validation dataset to feed data to the model through tf.data.dataset \"\"\" from enum", "number of shards (if a cluster config is set), otherwise None \"\"\" return", "return # If we have the same value as the chief, we load", "self.training_dataset = None self.validation_dataset = None self.feedable_dataset = None # Creating iterator with", "status is to be saved. None to disable, '' for default dir. :param", "iterator (with a new iterator_resource), unless specified otherwise if not self.no_iterator: self.create_iterator() def", "self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with", "return the same batch over-and-over to debug our model :param no_iterator: Boolean flag", "None self.feedable_dataset = None # Creating iterator with init ops self.iterator = None", "datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to", "= self.iterator.get_next() # Generating init op for each dataset # Using different names", "self.steps_in_current_mode = 0 # If we were training the train dataset, we need", "do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False self.session =", "or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress +", "between worker and chief does not match. Resuming using chief status.') self.training_mode =", "of shards has changed from %d to %d', status['num_shards'], self.num_shards) # If we", "tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\" field.' #", "saved iterator.') raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types =", "end of the file\"\"\" self._dataset_is_done = True def build(self): \"\"\" Builds the TensorFlow", "using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress", "the dataset in training mode :param session: The session used to initialize the", "status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress']", "\"\"\" # Not saving status if checkpoint_dir is None if not self.status_path: return", "shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset =", "= None self.feedable_dataset = None # Creating iterator with init ops self.iterator =", "just delete the worker status file else: os.unlink(self.status_path) # We load the fallback", "\"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode =", "= chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode", "= 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing =", "the Software. # ============================================================================== \"\"\" Supervised Dataset - Class responsible for using a", "created an iterator yet if self.iterator is None: return # Loading TensorFlow from", "epochs, and the current % of the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs", "load our status, otherwise we use the chief use_own_status = ((status['training_mode'] == chief_status['training_mode'])", "# Building the datasets self.build() @property def can_support_iterator(self): \"\"\" Determines if the dataset", "self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic default values from the output", "Running init_op # If session is wrapped, executing it without hooks init_op =", "case if self.num_shards == status['num_shards']: status_loaded = True else: LOGGER.info('Number of shards has", "= None self.output_features = None # This represents iterator.get_next() self.default_features = {} #", "\"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False,", "Number of items remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode", "have an iterator resource, so we use it if iterator_resource is not None:", "= 0 self._dataset_is_done = False # Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path)", "os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress,", "self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes)", "take_local_step(self): \"\"\" Increments the local step counter \"\"\" if not self.is_done or self.do_infinite_training:", "'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0", "to the same training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress *", "device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We have an iterator resource,", "'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation", "Determine if the iterator has been initialized \"\"\" return self._iterator_initialized @property def status_path(self):", "batch per tower :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and", "from the output types and output shapes self.default_features = {} for feature_name, feature_shape", "Determines if the dataset can support an iterator or if it is a", "diplomacy_research.settings import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of", "= pickle.load(status) status_loaded = True except EOFError: pass # We load the chief", "os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we", "if checkpoint_dir != '' else WORKING_DIR # None = disabled self.cluster_config = cluster_config", "self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure itertor is on the right device/worker", "another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder", "session used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" # We", "the current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by", "np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding,", "field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode #", "running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...) :type dataset_builder:", "a new iterator_resource), unless specified otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None,", "if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise,", "the number of completed epochs, and the current % of the epoch completed", "the dataset can support an iterator or if it is a remote (RPC)", "across multiple sessions that share the same devices (e.g. when using a remote", "= self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) #", "def start_validation_mode(self, session): \"\"\" Starts the dataset in validation mode :param session: The", "iterator resource) :param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator. :param shared_name:", "\"\"\" Starts the dataset in validation mode :param session: The session used to", "epoch will be for this percentage of available steps before running another evaluation", "self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local step counter", "0 def start_training_mode(self, session): \"\"\" Starts the dataset in training mode :param session:", "self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local step counter \"\"\" if not self.is_done", "is on the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We", "the same training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)", "starting from the beginning # For training, we might resume mid-epoch (from load_status())", "a ckpt) :param do_infinite_training: If set, supervised training will loop over the training", "TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True", "self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by skipping a certain number", "primary is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir,", "corresponds to the output of iterator.get_next() :return: Nothing, but sets the self.iterator, self.features,", "We have an iterator resource, so we use it if iterator_resource is not", "batch_size(self): \"\"\" Getter for batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\"", "not None: raise RuntimeError('You cannot change the batch_size when using shards') self._batch_size =", "batches in the training dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if hasattr(session,", "def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps per epoch in the current", "Optional. If non-empty, this iterator will be shared under the given name across", "None self.validation_dataset = None self.feedable_dataset = None # Creating iterator with init ops", "return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local step", "def fallback_status_path(self): \"\"\" Path to an alternate status file if the primary is", "self.cluster_config and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) #", "status: status = pickle.load(status) status_loaded = True except EOFError: pass # We load", "Path to the chief status path (to validate our status) \"\"\" if not", "= None self.validation_init_op = None self.output_features = None # This represents iterator.get_next() self.default_features", "disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir !=", "\"\"\" Returns the number of training steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto", "'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we just", "same training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def", "for each dataset # Using different names because we can't define initializers with", "self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed", "self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset. There are %d shards. Current shard", "compression_type='GZIP') # Sharding, but no need to shuffle if self.cluster_config and self.num_shards >", "The directory where the status is to be saved. None to disable, ''", "Only the training set can be accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode", "from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status", "= checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None = disabled self.cluster_config", "and generation methods :param checkpoint_dir: The directory where the status is to be", "# Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size)", "# Creating iterator (with a new iterator_resource), unless specified otherwise if not self.no_iterator:", "if the iterator has been initialized \"\"\" return self._iterator_initialized @property def status_path(self): \"\"\"", "is a remote (RPC) dataset \"\"\" return True @property def batch_size(self): \"\"\" Getter", "self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets self.build() @property def can_support_iterator(self): \"\"\" Determines", "return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter for batch_size \"\"\" if self.num_shards", "= dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets self.build() @property def can_support_iterator(self):", "validation set. :param perc_epoch_for_training: If set, the training epoch will be for this", "shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset", "have the same training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with", "checkpoint_dir is None if not self.status_path: return # Recomputing nb of completed epochs", "If we were training the train dataset, we need to skip a certain", "a \"request_id\" field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch)", "Trying to load from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb')", "batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter for batch_size \"\"\"", "self.num_shards > 1: LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.',", "infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and", "iterator with init ops self.iterator = None self._iterator_initialized = False self.training_init_op = None", "else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset", "datasets self.build() @property def can_support_iterator(self): \"\"\" Determines if the dataset can support an", "None to disable, '' for default dir. :param cluster_config: Optional. If set, the", "'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as file:", "\"\"\" Constructor :param batch_size: The size of a batch per tower :param dataset_builder:", "if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property", "iterator resource can only be set if the dataset was created with the", "charge, to any person obtaining # a copy of this software and associated", "import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\" field.'", "= False self.session = None # Creating empty datasets self.training_dataset = None self.validation_dataset", "need to shuffle if self.cluster_config and self.num_shards > 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index)", "1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset", "= 0. # Number of items remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto", "@property def nb_training_steps_per_epoch(self): \"\"\" Returns the number of training steps per epoch \"\"\"", "training steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch /", "yet if self.iterator is None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf", "the chief, we load our status, otherwise we use the chief use_own_status =", ":param perc_epoch_for_training: If set, the training epoch will be for this percentage of", "types and output shapes self.default_features = {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if", "the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You", "if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches in the training dataset.', self.nb_batches_to_skip)", "Returns the number of training steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training *", "non-empty, this iterator will be shared under the given name across multiple sessions", "dir. :param cluster_config: Optional. If set, the cluster configuration will be used for", "self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\" mode. Only the training set can", "self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count())", "= 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False # Loading", "+= 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0", "sharding and shuffling the dataset # Repeating to make sure all workers can", "shards has changed from %d to %d', status['num_shards'], self.num_shards) # If we are", "of `BaseBuilder` containing the proto-fields and generation methods :param checkpoint_dir: The directory where", "# Detecting num of shards change and deleting file if that's the case", "init op :type session: tensorflow.python.client.session.Session \"\"\" # We haven't created an iterator yet", "True else: LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'], self.num_shards)", "SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops iterating the dataset \"\"\" self.is_closing =", "self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0", "if not status_loaded: return # If we have the same value as the", "that share the same devices (e.g. when using a remote server). :param features:", "create an iterator (it will be loaded from a ckpt) :param do_infinite_training: If", "self.num_shards) # If we are chief, we do a cleanup on the status", "self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker and chief does not match. Resuming", "a status file to load, aborting if not status_loaded: return # If we", "(it will be loaded from a ckpt) :param do_infinite_training: If set, supervised training", ":param features: If an iterator_resource is specified, this corresponds to the output of", "same value as the chief, we load our status, otherwise we use the", "including without limitation the # rights to use, copy, modify, merge, publish, distribute,", "self._iterator_initialized = True self._dataset_is_done = False # For validation set, we can reset", "Loading TensorFlow from diplomacy_research.utils.tensorflow import tf # Running init_op # If session is", "set forever and will not switch to the validation set. :param perc_epoch_for_training: If", "def close(self): \"\"\" Stops iterating the dataset \"\"\" self.is_closing = True self.training_dataset =", "iterator.') raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types", "def num_shards(self): \"\"\" Returns the number of shards (if a cluster config is", "session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def", "self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts", "if the end of file has been reached \"\"\" if self.do_infinite_training: return False", "chief_status = status # We couldn't find a status file to load, aborting", "required when reloading a saved iterator.') raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow", "if iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if", "features is None: LOGGER.error('The iterator features are required when reloading a saved iterator.')", "\"\"\" return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter for batch_size \"\"\" if", "Boolean flag that indicates to not create an iterator (it will be loaded", "init_ops \"\"\" if iterator_resource is not None and not self.no_iterator: LOGGER.error('An iterator resource", "Regular mode # Otherwise, sharding and shuffling the dataset # Repeating to make", "not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features =", "iterator_resource: A tf.resource scalar tf.Tensor representing the iterator. :param shared_name: Optional. If non-empty,", "an iterator_resource is specified, this corresponds to the output of iterator.get_next() :return: Nothing,", "__init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size:", "self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new iterator_resource), unless", "iterator yet if self.iterator is None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow import", "will be used for distributed training. :param debug_batch: Boolean flag to indicate to", "can only be set if the dataset was created with the \"no_iterator\" flag.')", "self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the number of training", "iterator (it will be loaded from a ckpt) :param do_infinite_training: If set, supervised", "Class responsible for using a training and validation dataset to feed data to", "session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False # For", "path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status = pickle.load(status)", "An instance of `BaseBuilder` containing the proto-fields and generation methods :param checkpoint_dir: The", "size of a batch per tower :param dataset_builder: An instance of `BaseBuilder` containing", "feed data to the model through tf.data.dataset \"\"\" from enum import Enum import", "\"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook", "Returns the number of shards (if a cluster config is set), otherwise None", "MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def close(self): \"\"\" Stops", "open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status) else: chief_status = status # We", "initializer=None, output_types=output_types, output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features # Otherwise, we create", "_ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except", "self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as", "status) \"\"\" if not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)", "[os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback):", "= chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode", "self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL)", "# The above copyright notice and this permission notice shall be included in", "self.cluster_config else None): # We have an iterator resource, so we use it", "* self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the number of", "/ (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the", "\"\"\" return self._iterator_initialized @property def status_path(self): \"\"\" Path to the status file on", "model (using the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder,", "steps per epoch \"\"\" return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch @property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns", "Returns the number of steps per epoch in the current mode (Training /", "steps per epoch in the current mode (Training / Validation) \"\"\" if self.training_mode", "batches to skip self.steps_in_current_mode = 0 # Step count in current mode self.training_progress", "training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as", "generation methods :param checkpoint_dir: The directory where the status is to be saved.", "make sure all workers can loop on the dataset at all times else:", "self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if", "the number of training steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto", "# We haven't created an iterator yet if self.iterator is None: return #", "current mode self.training_progress = 0. # Number of items remaining in epoch self.total_nb_items_training_proto", "be loaded from a ckpt) :param do_infinite_training: If set, supervised training will loop", "forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode #", "dataset was created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\") if", "try: with open(self.fallback_status_path, 'rb') as status: status = pickle.load(status) status_loaded = True except", "def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow", "- <NAME> # # NOTICE: Permission is hereby granted, free of charge, to", "bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- #", "= int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self,", "a list of generic default values from the output types and output shapes", "status['num_shards'], self.num_shards) # If we are chief, we do a cleanup on the", "disk (where progress is saved) \"\"\" if not self.checkpoint_dir: return None if not", "name across multiple sessions that share the same devices (e.g. when using a", "object (optionally using a shared name and a specific iterator resource) :param iterator_resource:", "\"\"\" Returns the number of validation steps per epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto /", "where the status is to be saved. None to disable, '' for default", "the end of the file\"\"\" self._dataset_is_done = True def build(self): \"\"\" Builds the", "Starts the dataset in validation mode :param session: The session used to initialize", "different names because we can't define initializers with the same name self._iterator_initialized =", "available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback", "self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR #", "if not self.status_path: return # Recomputing nb of completed epochs when doing infinite", "# a copy of this software and associated documentation files (the \"Software\"), #", "to any person obtaining # a copy of this software and associated documentation", "self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)):", "all workers can loop on the dataset at all times else: if self.cluster_config", "epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0", "session: The session used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\"", "\"\"\" Enumeration of training modes \"\"\" TRAINING = 'train' VALIDATION = 'valid' class", "steps before running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...)", "progress is saved) \"\"\" if not self.checkpoint_dir: return None if not self.cluster_config: return", "value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by skipping a", "because we can't define initializers with the same name self._iterator_initialized = False self.training_init_op", "output_classes=output_classes) if features: self.output_features = features # Otherwise, we create a brand new", "status if checkpoint_dir is None. if not self.status_path: return # Trying to load", "\"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir", "import Enum import logging import os import math import multiprocessing import pickle import", "in the training dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'):", "# Creating iterator with init ops self.iterator = None self._iterator_initialized = False self.training_init_op", "dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size", "chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode =", "batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The", "\"\"\" Returns the number of steps per epoch in the current mode (Training", "= self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular mode # Otherwise, sharding and shuffling", "session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\" Starts the", "of batches # to get to the same training point if self.training_mode ==", "need to have a \"request_id\" field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP')", "shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init op for each dataset # Using", "saved. None to disable, '' for default dir. :param cluster_config: Optional. If set,", "training. :param debug_batch: Boolean flag to indicate to return the same batch over-and-over", "= 'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This object is responsible for", "not switch to the validation set. :param perc_epoch_for_training: If set, the training epoch", "data to the model through tf.data.dataset \"\"\" from enum import Enum import logging", "WORKING_DIR # None = disabled self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator =", "iterator_resource is specified, this corresponds to the output of iterator.get_next() :return: Nothing, but", "and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status) else:", "has been initialized \"\"\" return self._iterator_initialized @property def status_path(self): \"\"\" Path to the", "that batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset = self.training_dataset.repeat(count=-1) # Regular", "= tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function,", "= None # Creating empty datasets self.training_dataset = None self.validation_dataset = None self.feedable_dataset", "NOTICE: Permission is hereby granted, free of charge, to any person obtaining #", "configuration will be used for distributed training. :param debug_batch: Boolean flag to indicate", "'' for default dir. :param cluster_config: Optional. If set, the cluster configuration will", "self.feedable_dataset = None # Creating iterator with init ops self.iterator = None self._iterator_initialized", "a SessionRunHook for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self)", "session used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training:", "entries to feed the model (using the tf.data.dataset API) \"\"\" # pylint: disable=too-many-instance-attributes", "diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder =", "set), otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self):", "status file to load, aborting if not status_loaded: return # If we have", "self.steps_in_current_mode = 0 # Resuming by skipping a certain number of already processed", "'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\" Path to an alternate status", "TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need", "in fallbacks: if os.path.exists(fallback): return fallback return None @property def is_done(self): \"\"\" Returns", "values from the output types and output shapes self.default_features = {} for feature_name,", "VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This object is responsible for generating entries", "all times else: if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset. There are", "= 'valid' class SupervisedDataset(): \"\"\" This object is responsible for generating entries to", "pylint: disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\"", "status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status", "files (the \"Software\"), # to deal in the Software without restriction, including without", "int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object", "/ Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def", "None and not self.no_iterator: LOGGER.error('An iterator resource can only be set if the", "features=None): \"\"\" Creates an iterator object (optionally using a shared name and a", "num of shards change and deleting file if that's the case if self.num_shards", "switch to the validation set. :param perc_epoch_for_training: If set, the training epoch will", "an alternate status file if the primary is not available \"\"\" fallbacks =", "\"\"\" Returns the number of training steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training", "and the current % of the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs =", "self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list", "Software without restriction, including without limitation the # rights to use, copy, modify,", "ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes =", "'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback): return fallback", "as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status from disk", "= self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the current iterator", "== chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False if", "os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status) else: chief_status", "methods :param checkpoint_dir: The directory where the status is to be saved. None", "= self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset =", "\"\"\" Starts the dataset in training mode :param session: The session used to", "# Not loading status if checkpoint_dir is None. if not self.status_path: return #", "perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False self.session = None # Creating empty", "can't define initializers with the same name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset)", "Getter for batch_size \"\"\" return self._batch_size @batch_size.setter def batch_size(self, value): \"\"\" Setter for", "False # Not loading status if checkpoint_dir is None. if not self.status_path: return", "== TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if the", "False # Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with", "\"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property", "the dataset was created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\")", "persons to whom the Software is # furnished to do so, subject to", "and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False if use_own_status: self.training_mode", "= features # Otherwise, we create a brand new iterator else: self.iterator =", "publish, distribute, sublicense, and/or # sell copies of the Software, and to permit", "LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn", "conditions: # # The above copyright notice and this permission notice shall be", "@property def can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator or", "the beginning # For training, we might resume mid-epoch (from load_status()) - So", "'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\" Path to", "max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False self.session = None #", "if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # Resuming by skipping a certain", "%d batches in the training dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if", "set, supervised training will loop over the training set forever and will not", "self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir,", "any person obtaining # a copy of this software and associated documentation files", "support an iterator or if it is a remote (RPC) dataset \"\"\" return", "init op for each dataset # Using different names because we can't define", "\"\"\" Determines if the dataset can support an iterator or if it is", "we have the same training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path):", "== TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker and chief does not", "if the primary is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' %", "object is responsible for generating entries to feed the model (using the tf.data.dataset", "session: tensorflow.python.client.session.Session \"\"\" # We haven't created an iterator yet if self.iterator is", "specified, this corresponds to the output of iterator.get_next() :return: Nothing, but sets the", "open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\" Loads dataset status", "if the dataset was created with the \"no_iterator\" flag.') raise RuntimeError(\"Cannot create new", "self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init op", "remote server). :param features: If an iterator_resource is specified, this corresponds to the", "dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None =", "define initializers with the same name self._iterator_initialized = False self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op", "epoch in the current mode (Training / Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION:", "self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done", "1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False", "change the batch_size when using shards') self._batch_size = value @property def num_shards(self): \"\"\"", "Path to the status file on disk (where progress is saved) \"\"\" if", "int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns", "raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes", "self.validation_dataset = None self.feedable_dataset = None # Creating iterator with init ops self.iterator", "# Steps self.nb_batches_to_skip = 0 # Nb of batches to skip self.steps_in_current_mode =", "# pylint: disable=invalid-name \"\"\" Returns the number of training steps per full epoch", "= int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and saving if not os.path.exists(os.path.dirname(self.status_path)): os.makedirs(os.path.dirname(self.status_path),", "indicate to return the same batch over-and-over to debug our model :param no_iterator:", "\"\"\" # We haven't created an iterator yet if self.iterator is None: return", "Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode # Only taking", "used to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" # We haven't", "iterator or if it is a remote (RPC) dataset \"\"\" return True @property", "to the output of iterator.get_next() :return: Nothing, but sets the self.iterator, self.features, and", "this software and associated documentation files (the \"Software\"), # to deal in the", "self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the current iterator :param session:", "in training mode :param session: The session used to initialize the init op", "mode :param session: The session used to initialize the init op :type session:", "1 if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) %", "dataset, we need to skip a certain number of batches # to get", "self.fallback_status_path: try: with open(self.fallback_status_path, 'rb') as status: status = pickle.load(status) status_loaded = True", "> 1: shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching", "= None self.validation_dataset = None self.feedable_dataset = None # Creating iterator with init", "tf.resource scalar tf.Tensor representing the iterator. :param shared_name: Optional. If non-empty, this iterator", "status self._iterator_initialized = False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode", "\"Software\"), # to deal in the Software without restriction, including without limitation the", "disable=invalid-name \"\"\" Returns the number of training steps per full epoch \"\"\" return", "return None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' %", "self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the total number of training and validation", "brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next()", "pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir", "def is_done(self): \"\"\" Returns True if the end of file has been reached", "load_status(self): \"\"\" Loads dataset status from disk and resume where we were \"\"\"", "shared_name=None, features=None): \"\"\" Creates an iterator object (optionally using a shared name and", "import numpy as np from diplomacy_research.settings import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__)", "* self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns the total number of training and", "does not match. Resuming using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs']", "the init op :type session: tensorflow.python.client.session.Session \"\"\" # We haven't created an iterator", "the chief status to validate that we have the same training_mode and nb_epochs", "list of generic default values from the output types and output shapes self.default_features", "range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done()", "raise RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION self.steps_in_current_mode = 0 self.initialize_iterator(session) def", "# For training, we might resume mid-epoch (from load_status()) - So we keep", "def iterator_initialized(self): \"\"\" Determine if the iterator has been initialized \"\"\" return self._iterator_initialized", "self.default_features = {} # Will be used as default if features are missing", "the dataset # Repeating to make sure all workers can loop on the", "= status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else:", "(self.batch_size * self.num_shards))) @property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the number", "False return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode def take_local_step(self): \"\"\" Increments the local", "and features is None: LOGGER.error('The iterator features are required when reloading a saved", "'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False #", "= TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False # Loading number of items", "be for this percentage of available steps before running another evaluation epoch (e.g.", "the same training_mode and nb_epochs if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path): with open(self.chief_status_path,", "self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds a SessionRunHook for the", "\"\"\" This object is responsible for generating entries to feed the model (using", "# Loading number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path,", "\"\"\" if not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property", "@property def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the number of training steps", "self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if", "import pickle import numpy as np from diplomacy_research.settings import WORKING_DIR # Constants LOGGER", "the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): # We have an", "step counter \"\"\" if not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode", "self.no_iterator = no_iterator self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training", "ckpt) :param do_infinite_training: If set, supervised training will loop over the training set", "status_loaded: return # If we have the same value as the chief, we", "person obtaining # a copy of this software and associated documentation files (the", "notice and this permission notice shall be included in all # copies or", "# Creating empty datasets self.training_dataset = None self.validation_dataset = None self.feedable_dataset = None", "if features are missing from queue # Steps self.nb_batches_to_skip = 0 # Nb", "SessionRunHook for the MonitoredTrainingSession object \"\"\" from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook return SupervisedDatasetSessionRunHook(self) def", "to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of", "cannot change the batch_size when using shards') self._batch_size = value @property def num_shards(self):", "if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip", "* self.num_shards))) @property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation steps per", "= pickle.load(status) # Detecting num of shards change and deleting file if that's", "chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status", "since we are always starting from the beginning # For training, we might", "an iterator (it will be loaded from a ckpt) :param do_infinite_training: If set,", "def nb_training_steps_per_epoch(self): \"\"\" Returns the number of training steps per epoch \"\"\" nb_items_per_epoch", "= dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None", "if it is a remote (RPC) dataset \"\"\" return True @property def batch_size(self):", "evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type", "using a remote server). :param features: If an iterator_resource is specified, this corresponds", "chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If", "\"\"\" if not self.checkpoint_dir: return None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return", "training by skipping %d batches in the training dataset.', self.nb_batches_to_skip) try: for _", "status # We couldn't find a status file to load, aborting if not", "True if the end of file has been reached \"\"\" if self.do_infinite_training: return", "self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator object (optionally", "padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new iterator_resource), unless specified otherwise if not", "session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\" mode. Only", "we need to skip a certain number of batches # to get to", "Optional. If set, the cluster configuration will be used for distributed training. :param", "if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING", "pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building the datasets self.build() @property", "the primary is not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0),", "raise RuntimeError('You cannot change the batch_size when using shards') self._batch_size = value @property", "worker and chief does not match. Resuming using chief status.') self.training_mode = chief_status['training_mode']", "\"\"\" return True @property def batch_size(self): \"\"\" Getter for batch_size \"\"\" return self._batch_size", "self.cluster_config and self.cluster_config.is_chief: for status_ix in range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' %", "we use the chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs'])", "(optionally using a shared name and a specific iterator resource) :param iterator_resource: A", "status file on disk (where progress is saved) \"\"\" if not self.checkpoint_dir: return", "os.path.exists(fallback): return fallback return None @property def is_done(self): \"\"\" Returns True if the", "training and validation dataset to feed data to the model through tf.data.dataset \"\"\"", "resource, so we use it if iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource,", "epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode", "op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip = int(self.training_progress", "model :param no_iterator: Boolean flag that indicates to not create an iterator (it", "of training steps per epoch \"\"\" nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto return int(math.ceil(nb_items_per_epoch", "self.nb_batches_to_skip = 0 # Nb of batches to skip self.steps_in_current_mode = 0 #", "== TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self):", "and shuffling the dataset # Repeating to make sure all workers can loop", "self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching self.training_dataset", "and os.path.getsize(self.chief_status_path): with open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status) else: chief_status =", "else: if self.cluster_config and self.num_shards > 1: LOGGER.info('Sharding dataset. There are %d shards.", "granted, free of charge, to any person obtaining # a copy of this", "self.default_features[feature_name] = bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) #", "with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto =", "np from diplomacy_research.settings import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\"", "when using a remote server). :param features: If an iterator_resource is specified, this", "return # Recomputing nb of completed epochs when doing infinite training if self.do_infinite_training:", "find a status file to load, aborting if not status_loaded: return # If", "if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the number of training", "self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new iterator_resource), unless specified", "def batch_size(self, value): \"\"\" Setter for batch_size \"\"\" if self.num_shards is not None:", "= status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode", "\"\"\" Creates an iterator object (optionally using a shared name and a specific", "0. # Number of items remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto =", "might resume mid-epoch (from load_status()) - So we keep the current value if", "skip a certain number of batches # to get to the same training", "where we were \"\"\" status = {} status_loaded = False # Not loading", "# sell copies of the Software, and to permit persons to whom the", "to initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs +=", "def nb_total_steps_per_epoch(self): \"\"\" Returns the total number of training and validation steps per", "output shapes self.default_features = {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] ==", "# Generating init op for each dataset # Using different names because we", ":param batch_size: The size of a batch per tower :param dataset_builder: An instance", "num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a", "# We load the fallback status if not status_loaded and self.fallback_status_path: try: with", "self.training_init_op = self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the current", "start_validation_mode(self, session): \"\"\" Starts the dataset in validation mode :param session: The session", "proto-fields and generation methods :param checkpoint_dir: The directory where the status is to", "remaining in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs", "(to validate our status) \"\"\" if not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status',", "sessions that share the same devices (e.g. when using a remote server). :param", "@property def nb_validation_steps_per_epoch(self): \"\"\" Returns the number of validation steps per epoch \"\"\"", "self.training_progress = 0. # Number of items remaining in epoch self.total_nb_items_training_proto = 0", "initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1", "when reloading a saved iterator.') raise ValueError() # Loading TensorFlow from diplomacy_research.utils.tensorflow import", "percentage of available steps before running another evaluation epoch (e.g. 2.5% train, valid,", "status from disk and resume where we were \"\"\" status = {} status_loaded", "reset the steps since we are always starting from the beginning # For", "epoch \"\"\" return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))) @property def nb_total_steps_per_epoch(self): \"\"\" Returns", "the current % of the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode", "sublicense, and/or # sell copies of the Software, and to permit persons to", "@property def status_path(self): \"\"\" Path to the status file on disk (where progress", "names because we can't define initializers with the same name self._iterator_initialized = False", "number of already processed items if self.nb_batches_to_skip: LOGGER.info('Resuming training by skipping %d batches", "/ self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks the dataset as having reached", "0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False", "Software, and to permit persons to whom the Software is # furnished to", "skip self.steps_in_current_mode = 0 # Step count in current mode self.training_progress = 0.", "get_progress(self): \"\"\" Returns the number of completed epochs, and the current % of", "new iterator_resource), unless specified otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None,", "self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None = disabled", "Dataset - Class responsible for using a training and validation dataset to feed", "have a \"request_id\" field.' # Training dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug", "couldn't find a status file to load, aborting if not status_loaded: return #", "# Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\"", "@property def num_shards(self): \"\"\" Returns the number of shards (if a cluster config", "to skip self.steps_in_current_mode = 0 # Step count in current mode self.training_progress =", "features # Otherwise, we create a brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types,", "loading status if checkpoint_dir is None. if not self.status_path: return # Trying to", "# ============================================================================== \"\"\" Supervised Dataset - Class responsible for using a training and", "our status) \"\"\" if not self.cluster_config: return None return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' %", "with open(self.chief_status_path, 'rb') as chief_status: chief_status = pickle.load(chief_status) else: chief_status = status #", "= False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode']", "If we are chief, we do a cleanup on the status folder if", "return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id) @property def chief_status_path(self): \"\"\"", "and looping over that batch forever if self.debug_batch: self.training_dataset = self.training_dataset.take(self.batch_size) self.training_dataset =", "self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir != ''", "can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator or if it", "# We load the chief status to validate that we have the same", "config is set), otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config else 1 @property", "self.iterator.make_initializer(self.training_dataset) self.validation_init_op = self.iterator.make_initializer(self.validation_dataset) def initialize_iterator(self, session): \"\"\" Initializes the current iterator :param", "There are %d shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,", "Nb of batches to skip self.steps_in_current_mode = 0 # Step count in current", "dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype) # ----------------------------- # Validation dataset self.validation_dataset", "if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite training\" mode. Only the training set", "dataset to feed data to the model through tf.data.dataset \"\"\" from enum import", "number of steps per epoch in the current mode (Training / Validation) \"\"\"", "of batches to skip self.steps_in_current_mode = 0 # Step count in current mode", "shapes self.default_features = {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object:", "the current mode (Training / Validation) \"\"\" if self.training_mode == TrainingMode.VALIDATION: return self.nb_validation_steps_per_epoch", "status['training_progress'] if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker and", "specified otherwise if not self.no_iterator: self.create_iterator() def create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates", ":param no_iterator: Boolean flag that indicates to not create an iterator (it will", "resume later \"\"\" # Not saving status if checkpoint_dir is None if not", "Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size) self.training_dataset", "# Debug (batch) mode # Only taking one batch and looping over that", "through tf.data.dataset \"\"\" from enum import Enum import logging import os import math", "load_status()) - So we keep the current value if self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode", "WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training modes", "reached the end of the file\"\"\" self._dataset_is_done = True def build(self): \"\"\" Builds", "dataset self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode # Only taking one", "# Repeating to make sure all workers can loop on the dataset at", "= self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.training_dataset", "init op :type session: tensorflow.python.client.session.Session \"\"\" if self.is_done: self.nb_completed_epochs += 1 self.nb_batches_to_skip =", "file if that's the case if self.num_shards == status['num_shards']: status_loaded = True else:", "will not switch to the validation set. :param perc_epoch_for_training: If set, the training", "Loads dataset status from disk and resume where we were \"\"\" status =", "same devices (e.g. when using a remote server). :param features: If an iterator_resource", "= 0 # If we were training the train dataset, we need to", "value @property def num_shards(self): \"\"\" Returns the number of shards (if a cluster", "self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress = (self.training_progress + 1. /", "# Resuming by skipping a certain number of already processed items if self.nb_batches_to_skip:", "None self.validation_init_op = None self.output_features = None # This represents iterator.get_next() self.default_features =", "None # Creating empty datasets self.training_dataset = None self.validation_dataset = None self.feedable_dataset =", "as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset'] # Building", "TensorFlow from diplomacy_research.utils.tensorflow import tf # Running init_op # If session is wrapped,", "saved) \"\"\" if not self.checkpoint_dir: return None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl')", "has changed from %d to %d', status['num_shards'], self.num_shards) # If we are chief,", "True except EOFError: pass # We load the chief status to validate that", "cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param batch_size: The size of a", "= dataset_index['size_valid_dataset'] # Building the datasets self.build() @property def can_support_iterator(self): \"\"\" Determines if", "tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path, compression_type='GZIP') # Debug (batch) mode # Only taking one batch and looping", "flag to indicate to return the same batch over-and-over to debug our model", "generating entries to feed the model (using the tf.data.dataset API) \"\"\" # pylint:", "def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor :param", "we create a brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name)", "self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if the iterator has been", "training dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda", "os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback): return fallback return None @property", "= self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size) # Batching with prefetching self.training_dataset =", "not self.is_done or self.do_infinite_training: self.steps_in_current_mode += 1 if self.training_mode == TrainingMode.TRAINING: self.training_progress =", "= tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init op for", "not available \"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for", "* self.batch_size) self.training_dataset = self.training_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Building a list of generic default", "itertor is on the right device/worker with tf.device(self.cluster_config.iterator_device if self.cluster_config else None): #", "We couldn't find a status file to load, aborting if not status_loaded: return", "load from primary path if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status:", "in all # copies or substantial portions of the Software. # ============================================================================== \"\"\"", "range(self.num_shards, status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix))", "This object is responsible for generating entries to feed the model (using the", "iterator will be shared under the given name across multiple sessions that share", "index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset =", "self.nb_steps_per_epoch_current_mode return self.nb_completed_epochs, perc_epoch_completed def save_status(self): \"\"\" Save current status to file to", "resource) :param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator. :param shared_name: Optional.", "debug_batch: Boolean flag to indicate to return the same batch over-and-over to debug", "The above copyright notice and this permission notice shall be included in all", "= value @property def num_shards(self): \"\"\" Returns the number of shards (if a", "status, otherwise we use the chief use_own_status = ((status['training_mode'] == chief_status['training_mode']) and status['nb_completed_epochs']", "Creating iterator with init ops self.iterator = None self._iterator_initialized = False self.training_init_op =", "== TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we were training the train dataset,", "self.do_infinite_training = do_infinite_training self.is_closing = False self.session = None # Creating empty datasets", "to not create an iterator (it will be loaded from a ckpt) :param", "self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if", "training set can be accessed.') raise RuntimeError('Invalid training mode specified.') self.training_mode = TrainingMode.VALIDATION", "training steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))) @property", "if os.path.exists(self.status_path) and os.path.getsize(self.status_path): with open(self.status_path, 'rb') as status: status = pickle.load(status) #", "@property def is_done(self): \"\"\" Returns True if the end of file has been", "%d to %d', status['num_shards'], self.num_shards) # If we are chief, we do a", "will be for this percentage of available steps before running another evaluation epoch", "match. Resuming using chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode =", "else: session.run(self.output_features['request_id']) except tf.errors.OutOfRangeError: self.mark_as_done() self.nb_batches_to_skip = 0 def start_training_mode(self, session): \"\"\" Starts", "status if checkpoint_dir is None if not self.status_path: return # Recomputing nb of", "\"\"\" from diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have", "can support an iterator or if it is a remote (RPC) dataset \"\"\"", "Boolean flag to indicate to return the same batch over-and-over to debug our", "= self.validation_dataset.prefetch(20 * self.batch_size) self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a", "'valid' class SupervisedDataset(): \"\"\" This object is responsible for generating entries to feed", "representing the iterator. :param shared_name: Optional. If non-empty, this iterator will be shared", "training epoch will be for this percentage of available steps before running another", "this iterator will be shared under the given name across multiple sessions that", "output_shapes=output_shapes, output_classes=output_classes) if features: self.output_features = features # Otherwise, we create a brand", "# # The above copyright notice and this permission notice shall be included", "else: os.unlink(self.status_path) # We load the fallback status if not status_loaded and self.fallback_status_path:", "\"\"\" if self.num_shards is not None: raise RuntimeError('You cannot change the batch_size when", "feature_shape in self.dataset_builder.output_shapes.items(): if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype", "\"no_iterator\" flag.') raise RuntimeError(\"Cannot create new iterator\") if iterator_resource is not None and", "will be shared under the given name across multiple sessions that share the", "LOGGER.error('An iterator resource can only be set if the dataset was created with", "# to get to the same training point if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip", "nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name \"\"\" Returns the number of training steps per full", "the number of training steps per full epoch \"\"\" return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size", "RuntimeError('You cannot change the batch_size when using shards') self._batch_size = value @property def", "as status: status = pickle.load(status) status_loaded = True except EOFError: pass # We", "session): \"\"\" Initializes the current iterator :param session: The session used to initialize", "chief_status['training_mode']) and status['nb_completed_epochs'] == chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False if use_own_status:", "Starts the dataset in training mode :param session: The session used to initialize", "== chief_status['nb_completed_epochs']) # Loading status self._iterator_initialized = False if use_own_status: self.training_mode = status['training_mode']", "changed from %d to %d', status['num_shards'], self.num_shards) # If we are chief, we", "self.validation_dataset.padded_batch(self.batch_size, padded_shapes=self.dataset_builder.padded_shapes) # Creating iterator (with a new iterator_resource), unless specified otherwise if", "'rb') as status: status = pickle.load(status) status_loaded = True except EOFError: pass #", "# Validation dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need to", "shards change and deleting file if that's the case if self.num_shards == status['num_shards']:", "chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress = chief_status['training_progress'] if self.training_mode ==", "if features: self.output_features = features # Otherwise, we create a brand new iterator", "diplomacy_research.utils.tensorflow import tf assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\"", "\"\"\" Supervised Dataset - Class responsible for using a training and validation dataset", "indicates to not create an iterator (it will be loaded from a ckpt)", "\"\"\" Returns the number of completed epochs, and the current % of the", "'status', 'status-%03d.pkl' % status_ix)) # Otherwise, we just delete the worker status file", "missing from queue # Steps self.nb_batches_to_skip = 0 # Nb of batches to", "chief status.') self.training_mode = chief_status['training_mode'] self.nb_completed_epochs = chief_status['nb_completed_epochs'] self.steps_in_current_mode = chief_status['steps_current_mode'] self.training_progress =", "\"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode", "os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0) @property def fallback_status_path(self): \"\"\" Path to an alternate", "if not self.checkpoint_dir: return None if not self.cluster_config: return os.path.join(self.checkpoint_dir, 'status.pkl') return os.path.join(self.checkpoint_dir,", "Repeating to make sure all workers can loop on the dataset at all", "import os import math import multiprocessing import pickle import numpy as np from", "is None: LOGGER.error('The iterator features are required when reloading a saved iterator.') raise", "if the dataset can support an iterator or if it is a remote", "deal in the Software without restriction, including without limitation the # rights to", "self.features, and dataset init_ops \"\"\" if iterator_resource is not None and not self.no_iterator:", "Save current status to file to be able to resume later \"\"\" #", "= logging.getLogger(__name__) class TrainingMode(Enum): \"\"\" Enumeration of training modes \"\"\" TRAINING = 'train'", "return self.cluster_config.num_shards if self.cluster_config else 1 @property def nb_training_steps_per_epoch(self): \"\"\" Returns the number", "else max(1e-3, min(1., perc_epoch_for_training)) self.do_infinite_training = do_infinite_training self.is_closing = False self.session = None", "# Nb of batches to skip self.steps_in_current_mode = 0 # Step count in", "dataset # Using different names because we can't define initializers with the same", "True def build(self): \"\"\" Builds the TensorFlow datasets \"\"\" from diplomacy_research.utils.tensorflow import tf", "status['num_shards']): if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)): os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)) #", "load, aborting if not status_loaded: return # If we have the same value", "batch_size self.dataset_builder = dataset_builder self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR", "flag that indicates to not create an iterator (it will be loaded from", ":type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder", "is set), otherwise None \"\"\" return self.cluster_config.num_shards if self.cluster_config else 1 @property def", "tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset = self.training_dataset.repeat() self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size)", "aborting if not status_loaded: return # If we have the same value as", "nb of completed epochs when doing infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode", ":param checkpoint_dir: The directory where the status is to be saved. None to", "A tf.resource scalar tf.Tensor representing the iterator. :param shared_name: Optional. If non-empty, this", "do so, subject to the following conditions: # # The above copyright notice", "init op :type session: tensorflow.python.client.session.Session \"\"\" if self.do_infinite_training: LOGGER.error('Dataset is currently in \"infinite", "output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init op for each dataset #", "if self.training_mode == TrainingMode.TRAINING: self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) def make_session_run_hook(self): \"\"\" Builds", "chief, we load our status, otherwise we use the chief use_own_status = ((status['training_mode']", "# If we have the same value as the chief, we load our", "tower :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods", "self.validation_dataset = self.validation_dataset.apply(shard_fn) # Batching with prefetching self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function, num_parallel_calls=multiprocessing.cpu_count()) self.validation_dataset =", "permit persons to whom the Software is # furnished to do so, subject", "perc_epoch_completed def save_status(self): \"\"\" Save current status to file to be able to", "\"\"\" Initializes the current iterator :param session: The session used to initialize the", "without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense,", "\"\"\" status = {} status_loaded = False # Not loading status if checkpoint_dir", "doing infinite training if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory", "training modes \"\"\" TRAINING = 'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This", "1: LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index)", "= None # Creating iterator with init ops self.iterator = None self._iterator_initialized =", "count in current mode self.training_progress = 0. # Number of items remaining in", "session.run(init_op) self._iterator_initialized = True self._dataset_is_done = False # For validation set, we can", "None: return # Loading TensorFlow from diplomacy_research.utils.tensorflow import tf # Running init_op #", "be able to resume later \"\"\" # Not saving status if checkpoint_dir is", "= False # Not loading status if checkpoint_dir is None. if not self.status_path:", "status['num_shards']: status_loaded = True else: LOGGER.info('Number of shards has changed from %d to", "os.makedirs(os.path.dirname(self.status_path), exist_ok=True) status = {'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards':", "\"\"\" Returns the total number of training and validation steps per epoch \"\"\"", "disable=too-many-instance-attributes def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False, no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.): \"\"\" Constructor", "= (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks the", "@property def nb_steps_per_epoch_current_mode(self): \"\"\" Returns the number of steps per epoch in the", "\"\"\" fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in", "to indicate to return the same batch over-and-over to debug our model :param", "create a brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features", "a specific iterator resource) :param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator.", "to be able to resume later \"\"\" # Not saving status if checkpoint_dir", "self.steps_in_current_mode = 0 self.initialize_iterator(session) def start_validation_mode(self, session): \"\"\" Starts the dataset in validation", "the chief status path (to validate our status) \"\"\" if not self.cluster_config: return", "in self.dataset_builder.get_proto_fields(), 'You need to have a \"request_id\" field.' # Training dataset self.training_dataset", "from a ckpt) :param do_infinite_training: If set, supervised training will loop over the", "iterator has been initialized \"\"\" return self._iterator_initialized @property def status_path(self): \"\"\" Path to", "initialize the init op :type session: tensorflow.python.client.session.Session \"\"\" # We haven't created an", "self.build() @property def can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator", "self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs = 0 self._dataset_is_done = False # Loading number of", "the same devices (e.g. when using a remote server). :param features: If an", "1. / self.nb_training_steps_per_full_epoch) % 1 def mark_as_done(self): \"\"\" Marks the dataset as having", "we might resume mid-epoch (from load_status()) - So we keep the current value", "training\" mode. Only the training set can be accessed.') raise RuntimeError('Invalid training mode", "default dir. :param cluster_config: Optional. If set, the cluster configuration will be used", "number of items remaining if os.path.exists(self.dataset_builder.dataset_index_path) \\ and os.path.getsize(self.dataset_builder.dataset_index_path): with open(self.dataset_builder.dataset_index_path, 'rb') as", "status to file to be able to resume later \"\"\" # Not saving", "{'training_mode': self.training_mode, 'nb_completed_epochs': self.nb_completed_epochs, 'steps_current_mode': self.steps_in_current_mode, 'training_progress': self.training_progress, 'num_shards': self.num_shards} with open(self.status_path, 'wb')", "create_iterator(self, iterator_resource=None, shared_name=None, features=None): \"\"\" Creates an iterator object (optionally using a shared", "of the epoch completed \"\"\" if self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) perc_epoch_completed", "create new iterator\") if iterator_resource is not None and features is None: LOGGER.error('The", "self.initialize_iterator(session) def get_progress(self): \"\"\" Returns the number of completed epochs, and the current", "def take_local_step(self): \"\"\" Increments the local step counter \"\"\" if not self.is_done or", "'train' VALIDATION = 'valid' class SupervisedDataset(): \"\"\" This object is responsible for generating", "we can reset the steps since we are always starting from the beginning", "skipping %d batches in the training dataset.', self.nb_batches_to_skip) try: for _ in range(self.nb_batches_to_skip):", "epoch (e.g. 2.5% train, valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config:", "output types and output shapes self.default_features = {} for feature_name, feature_shape in self.dataset_builder.output_shapes.items():", "iterator\") if iterator_resource is not None and features is None: LOGGER.error('The iterator features", "def initialize_iterator(self, session): \"\"\" Initializes the current iterator :param session: The session used", "file on disk (where progress is saved) \"\"\" if not self.checkpoint_dir: return None", "the self.iterator, self.features, and dataset init_ops \"\"\" if iterator_resource is not None and", "later \"\"\" # Not saving status if checkpoint_dir is None if not self.status_path:", "try: for _ in range(self.nb_batches_to_skip): if hasattr(session, 'run_step_fn'): session.run_step_fn( lambda step_context: step_context.session.run(self.output_features['request_id'])) else:", "or if it is a remote (RPC) dataset \"\"\" return True @property def", "============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted,", "= disabled self.cluster_config = cluster_config self.debug_batch = debug_batch self.no_iterator = no_iterator self.perc_epoch_for_training =", "% 0), os.path.join(self.checkpoint_dir, 'status.pkl')] for fallback in fallbacks: if os.path.exists(fallback): return fallback return", "open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index: dataset_index = pickle.load(dataset_index) self.total_nb_items_training_proto = dataset_index['size_train_dataset'] self.total_nb_items_valid_proto = dataset_index['size_valid_dataset']", "dataset self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path, compression_type='GZIP') # Sharding, but no need to shuffle if", "valid, 2.5% train, ...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint:", "the datasets self.build() @property def can_support_iterator(self): \"\"\" Determines if the dataset can support", "= 0 # Step count in current mode self.training_progress = 0. # Number", "# This represents iterator.get_next() self.default_features = {} # Will be used as default", "False self.training_init_op = None self.validation_init_op = None self.output_features = None # This represents", "change and deleting file if that's the case if self.num_shards == status['num_shards']: status_loaded", "Recomputing nb of completed epochs when doing infinite training if self.do_infinite_training: self.nb_completed_epochs =", "self.do_infinite_training: self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch) # Creating directory and saving if not", "will loop over the training set forever and will not switch to the", "a batch per tower :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields", "the same value as the chief, we load our status, otherwise we use", "if self.dataset_builder.output_types[feature_name] == np.object: self.default_features[feature_name] = bytes('', 'utf-8') else: dtype = self.dataset_builder.output_types[feature_name] self.default_features[feature_name]", "self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes # Making sure itertor is on", "op for each dataset # Using different names because we can't define initializers", "0) @property def fallback_status_path(self): \"\"\" Path to an alternate status file if the", "(the \"Software\"), # to deal in the Software without restriction, including without limitation", "self._iterator_initialized = False if use_own_status: self.training_mode = status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode =", "import logging import os import math import multiprocessing import pickle import numpy as", "are required when reloading a saved iterator.') raise ValueError() # Loading TensorFlow from", "to return the same batch over-and-over to debug our model :param no_iterator: Boolean", "Sharding, but no need to shuffle if self.cluster_config and self.num_shards > 1: shard_fn", "# Otherwise, we create a brand new iterator else: self.iterator = tf.data.Iterator.from_structure(output_types=output_types, output_shapes=output_shapes,", ":param do_infinite_training: If set, supervised training will loop over the training set forever", "1 self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch) self.training_mode = TrainingMode.TRAINING self.steps_in_current_mode = 0 self.initialize_iterator(session)", "a cleanup on the status folder if self.cluster_config and self.cluster_config.is_chief: for status_ix in", "directory where the status is to be saved. None to disable, '' for", "output_shapes=output_shapes, output_classes=output_classes, shared_name=shared_name) self.output_features = self.iterator.get_next() # Generating init op for each dataset", "shard index: #%d.', self.cluster_config.num_shards, self.cluster_config.shard_index) shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards, shard_index=self.cluster_config.shard_index) self.training_dataset = self.training_dataset.apply(shard_fn) self.training_dataset", "self.training_mode == TrainingMode.VALIDATION: self.steps_in_current_mode = 0 else: LOGGER.warning('Status between worker and chief does", "do a cleanup on the status folder if self.cluster_config and self.cluster_config.is_chief: for status_ix", "- Class responsible for using a training and validation dataset to feed data", "...) :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size =", "we use it if iterator_resource is not None: self.iterator = tf.data.Iterator(iterator_resource=iterator_resource, initializer=None, output_types=output_types,", "to debug our model :param no_iterator: Boolean flag that indicates to not create", "in epoch self.total_nb_items_training_proto = 0 self.total_nb_items_valid_proto = 0 self.training_mode = TrainingMode.TRAINING self.nb_completed_epochs =", "we were \"\"\" status = {} status_loaded = False # Not loading status", "\"infinite training\" mode. Only the training set can be accessed.') raise RuntimeError('Invalid training", "'num_shards': self.num_shards} with open(self.status_path, 'wb') as file: pickle.dump(status, file, pickle.HIGHEST_PROTOCOL) def load_status(self): \"\"\"", "file else: os.unlink(self.status_path) # We load the fallback status if not status_loaded and", "that indicates to not create an iterator (it will be loaded from a", "diplomacy_research.utils.cluster.ClusterConfig \"\"\" # pylint: disable=too-many-arguments self._batch_size = batch_size self.dataset_builder = dataset_builder self.checkpoint_dir =", "as the chief, we load our status, otherwise we use the chief use_own_status", "return self.nb_validation_steps_per_epoch return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if the iterator has", "# Sharding, but no need to shuffle if self.cluster_config and self.num_shards > 1:", "diplomacy_research.utils.tensorflow import tf output_types = self.training_dataset.output_types output_shapes = self.training_dataset.output_shapes output_classes = self.training_dataset.output_classes #", "initialize_iterator(self, session): \"\"\" Initializes the current iterator :param session: The session used to", "return self.nb_training_steps_per_epoch @property def iterator_initialized(self): \"\"\" Determine if the iterator has been initialized", "status['training_mode'] self.nb_completed_epochs = status['nb_completed_epochs'] self.steps_in_current_mode = status['steps_current_mode'] self.training_progress = status['training_progress'] if self.training_mode ==", "numpy as np from diplomacy_research.settings import WORKING_DIR # Constants LOGGER = logging.getLogger(__name__) class", "None # Creating iterator with init ops self.iterator = None self._iterator_initialized = False", "= {TrainingMode.TRAINING: self.training_init_op, TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode] if hasattr(session, 'run_step_fn'): session.run_step_fn(lambda step_context: step_context.session.run(init_op)) else: session.run(init_op)", "TrainingMode.VALIDATION: self.steps_in_current_mode = 0 # If we were training the train dataset, we" ]
[ "__tablename__ = 'users' username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def", "@staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\",", "db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username, 'email': self.email}", "get_all_users(): return [User.json(user) for user in User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first()", "import db class User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80), primary_key=True) email =", "for user in User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod", "def add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email):", "json(self): return{'username': self.username, 'email': self.email} @staticmethod def get_all_users(): return [User.json(user) for user in", "get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email): new_user = User(username=_username,", "query @staticmethod def add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def", "def json(self): return{'username': self.username, 'email': self.email} @staticmethod def get_all_users(): return [User.json(user) for user", "db class User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120),", "update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username): is_successful", "User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return", "return{'username': self.username, 'email': self.email} @staticmethod def get_all_users(): return [User.json(user) for user in User.query.all()]", "query = User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email): new_user = User(username=_username, email=_email)", "in User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username,", "nullable=False) def json(self): return{'username': self.username, 'email': self.email} @staticmethod def get_all_users(): return [User.json(user) for", "user in User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod def", "new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first()", "db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td():", "_email db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def", "class User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True,", "\"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = { 'username': self.username, 'email': self.email }", "[User.json(user) for user in User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return query", "return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self):", "User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\")", "is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\")", "User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False)", "db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username, 'email': self.email} @staticmethod def get_all_users(): return", "import json from db_config import db class User(db.Model): __tablename__ = 'users' username =", "user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful)", "'email': self.email} @staticmethod def get_all_users(): return [User.json(user) for user in User.query.all()] @staticmethod def", "@staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email): new_user", "primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username, 'email': self.email} @staticmethod", "User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = { 'username': self.username, 'email': self.email", "def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = {", "return query @staticmethod def add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod", "@staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object =", "db_config import db class User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80), primary_key=True) email", "self.email} @staticmethod def get_all_users(): return [User.json(user) for user in User.query.all()] @staticmethod def get_user(_username):", "@staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def", "User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit()", "email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email", "= _email db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod", "json from db_config import db class User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80),", "@staticmethod def add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username,", "def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\")", "_email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username): is_successful =", "User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = { 'username': self.username, 'email': self.email } return", "from db_config import db class User(db.Model): __tablename__ = 'users' username = db.Column(db.String(80), primary_key=True)", "= db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username, 'email':", "user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete()", "add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update", "_email): new_user = User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update =", "bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object", "self.username, 'email': self.email} @staticmethod def get_all_users(): return [User.json(user) for user in User.query.all()] @staticmethod", "= User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email): new_user = User(username=_username, email=_email) db.session.add(new_user)", "= User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\",", "def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username):", "db.session.commit() @staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod", "@staticmethod def get_all_users(): return [User.json(user) for user in User.query.all()] @staticmethod def get_user(_username): query", "return [User.json(user) for user in User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return", "def get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email): new_user =", "\"<EMAIL>\") def __repr__(self): user_object = { 'username': self.username, 'email': self.email } return json.dumps(user_object)", "= 'users' username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def json(self):", "= User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email", "'users' username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username':", "= db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username, 'email': self.email} @staticmethod def get_all_users():", "add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = { 'username':", "User(username=_username, email=_email) db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email =", "db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def", "db.session.add(new_user) db.session.commit() @staticmethod def update_email(_username, _email): user_to_update = User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit()", "unique=True, nullable=False) def json(self): return{'username': self.username, 'email': self.email} @staticmethod def get_all_users(): return [User.json(user)", "User.query.all()] @staticmethod def get_user(_username): query = User.query.filter_by(username=_username).first() return query @staticmethod def add_user(_username, _email):", "username = db.Column(db.String(80), primary_key=True) email = db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username,", "\"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = { 'username': self.username, 'email':", "User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\", \"<EMAIL>\") User.add_user(\"thor\", \"<EMAIL>\") def __repr__(self): user_object = { 'username': self.username,", "= User.query.filter_by(username=_username).first() user_to_update.email = _email db.session.commit() @staticmethod def delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit()", "email = db.Column(db.String(120), unique=True, nullable=False) def json(self): return{'username': self.username, 'email': self.email} @staticmethod def", "def get_all_users(): return [User.json(user) for user in User.query.all()] @staticmethod def get_user(_username): query =", "delete_user(_username): is_successful = User.query.filter_by(username=_username).delete() db.session.commit() return bool(is_successful) @staticmethod def add_user_td(): User.add_user(\"darth\", \"<EMAIL>\") User.add_user(\"superman\"," ]
[ "= finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE')", "end_date(self): \"\"\"Last Timestamp value of the time-series index. Returns: Timestamp: a pandas timestamp.", "margot.data.symbols import Symbol from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together", "self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series index. Returns: pd.Index: a pandas timeseries", "set_when(self, when): # noqa: D102 self._when = when def simulate(self, when): \"\"\"Create a", "self._when @when.setter def set_when(self, when): # noqa: D102 self._when = when def simulate(self,", "feature in self.features: getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self):", "df.dropna() if periods: df = df.tail(periods) return df def refresh(self): \"\"\"Refresh all Symbols", "dataframe representing all data from the MargotDataFrame \"\"\" # Get the elements one", "in self.symbols], axis=1) else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series", "[ name for name, ref in getmembers(self, lambda m: isinstance(m, Symbol))] self.features =", "pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series index. Returns:", "len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 = pd.concat([getattr(self,", "self.features: getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when =", "int = None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe representing this MargotDataFrame.", "them and ensemble. if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) >", "go back to. \"\"\" self._when = when for symbol in self.symbols: getattr(self, symbol).simulate(when)", "import getmembers import pandas as pd from margot.data.features import BaseFeature from margot.data.symbols import", "Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series", "timeseries index. \"\"\" return self.to_pandas().index @property def when(self): return self._when @when.setter def set_when(self,", "<reponame>pymargot/margot from inspect import getmembers import pandas as pd from margot.data.features import BaseFeature", "self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None for symbol in self.symbols: getattr(self,", "index. \"\"\" return self.to_pandas().index @property def when(self): return self._when @when.setter def set_when(self, when):", "\"\"\"Refresh all Symbols in this DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh() #", "simulating a historical datetime, it is not possible to go back to the", "the writing of trading algorithms. After simulating a historical datetime, it is not", "MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns, features and ratios. Example:: class Equity(Symbol):", "Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf =", "BaseFeature from margot.data.symbols import Symbol from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame", "\"\"\" def __init__(self): # noqa: D107 self.symbols = [ name for name, ref", "name for name, ref in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self,", "= None for symbol in self.symbols: getattr(self, symbol).simulate() for feature in self.features: getattr(self,", "= Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf", "inspect import getmembers import pandas as pd from margot.data.features import BaseFeature from margot.data.symbols", "margot.data.features import BaseFeature from margot.data.symbols import Symbol from margot.data.ratio import Ratio class MargotDataFrame(object):", "\"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value of the time-series index.", "spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ')", "feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None for", "av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy =", "def start_date(self): \"\"\"First Timestamp of the time-series index. Returns: Timestamp: a pandas timestamp.", "to go back to the future. Args: when (tz_aware datetime or pd.Timestamp): when", "(tz_aware datetime or pd.Timestamp): when to go back to. \"\"\" self._when = when", "df2], axis=1) if dropna: df = df.dropna() if periods: df = df.tail(periods) return", "for name, ref in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods:", "and ensemble. if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1:", "__init__(self): # noqa: D107 self.symbols = [ name for name, ref in getmembers(self,", "df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series for name in self.ratios", "pd.Index: a pandas timeseries index. \"\"\" return self.to_pandas().index @property def when(self): return self._when", "return self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series index. Returns: pd.Index: a pandas", "a dataframe simulating a datetime in history. Used for backtesting to simplify the", "trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self): #", "in self.symbols: getattr(self, member).refresh() # TODO what about ratios? @property def start_date(self): \"\"\"First", "df def refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\" for member in self.symbols:", "representing this MargotDataFrame. Args: periods (int, optional): only return the tail n periods.", "def simulate(self, when): \"\"\"Create a dataframe simulating a datetime in history. Used for", "from margot.data.features import BaseFeature from margot.data.symbols import Symbol from margot.data.ratio import Ratio class", "about ratios? @property def start_date(self): \"\"\"First Timestamp of the time-series index. Returns: Timestamp:", "spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self): # noqa:", "name, ref in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios = [ name for", "features and ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close')", "== 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas()", "[ name for name, ref in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def", "Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class", "ratio).simulate(when) def end_simulation(self): self._when = None for symbol in self.symbols: getattr(self, symbol).simulate() for", "elements one at a time, to pandas them and ensemble. if len(self.symbols) ==", "datetime, it is not possible to go back to the future. Args: when", "when(self): return self._when @when.setter def set_when(self, when): # noqa: D102 self._when = when", "\"\"\"Return a pandas Dataframe representing this MargotDataFrame. Args: periods (int, optional): only return", "import BaseFeature from margot.data.symbols import Symbol from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A", "else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series for name in", "for name in self.ratios + self.features}) df = pd.concat([df1, df2], axis=1) if dropna:", "the future. Args: when (tz_aware datetime or pd.Timestamp): when to go back to.", "df2 = pd.DataFrame({('margot', name): getattr(self, name).series for name in self.ratios + self.features}) df", "self.features}) df = pd.concat([df1, df2], axis=1) if dropna: df = df.dropna() if periods:", "a datetime in history. Used for backtesting to simplify the writing of trading", "import Symbol from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols,", "columns, features and ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns =", "a pandas timeseries index. \"\"\" return self.to_pandas().index @property def when(self): return self._when @when.setter", "from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns, features", "import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns, features and ratios.", "in self.symbols: getattr(self, symbol).simulate() for feature in self.features: getattr(self, feature).simulate() for ratio in", "the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def", "vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\"", "when (tz_aware datetime or pd.Timestamp): when to go back to. \"\"\" self._when =", "\"\"\" # Get the elements one at a time, to pandas them and", "None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe representing this MargotDataFrame. Args: periods", "of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property", "pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1) else: df1 = pd.DataFrame() df2 =", "lambda m: isinstance(m, BaseFeature))] self.ratios = [ name for name, ref in getmembers(self,", "all data from the MargotDataFrame \"\"\" # Get the elements one at a", "the elements one at a time, to pandas them and ensemble. if len(self.symbols)", "symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when)", "df = df.tail(periods) return df def refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\"", "symbol in self.symbols: getattr(self, symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when) for ratio", "1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas() for", "realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO',", "Symbols in this DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh() # TODO what", "df1 = pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1) else: df1 = pd.DataFrame()", "= Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self): # noqa: D107", "of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property", "self._when = when def simulate(self, when): \"\"\"Create a dataframe simulating a datetime in", "= ExampleDF() \"\"\" def __init__(self): # noqa: D107 self.symbols = [ name for", "import pandas as pd from margot.data.features import BaseFeature from margot.data.symbols import Symbol from", "@property def end_date(self): \"\"\"Last Timestamp value of the time-series index. Returns: Timestamp: a", "when): # noqa: D102 self._when = when def simulate(self, when): \"\"\"Create a dataframe", "when): \"\"\"Create a dataframe simulating a datetime in history. Used for backtesting to", "a historical datetime, it is not possible to go back to the future.", "periods: int = None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe representing this", "Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self):", "from the MargotDataFrame \"\"\" # Get the elements one at a time, to", "\"\"\"Last Timestamp value of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\"", "dropna: df = df.dropna() if periods: df = df.tail(periods) return df def refresh(self):", "self.to_pandas().index @property def when(self): return self._when @when.setter def set_when(self, when): # noqa: D102", "if dropna: df = df.dropna() if periods: df = df.tail(periods) return df def", "df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas() for name", "df = df.dropna() if periods: df = df.tail(periods) return df def refresh(self): \"\"\"Refresh", "in self.ratios + self.features}) df = pd.concat([df1, df2], axis=1) if dropna: df =", "# noqa: D107 self.symbols = [ name for name, ref in getmembers(self, lambda", "def __init__(self): # noqa: D107 self.symbols = [ name for name, ref in", "= df.tail(periods) return df def refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\" for", "TODO what about ratios? @property def start_date(self): \"\"\"First Timestamp of the time-series index.", "label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self): # noqa: D107 self.symbols = [", "symbols, columns, features and ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns", "the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def", "= pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1) else: df1 = pd.DataFrame() df2", "pd.Timestamp): when to go back to. \"\"\" self._when = when for symbol in", "refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh()", "Timestamp value of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return", "data from the MargotDataFrame \"\"\" # Get the elements one at a time,", "dataframe simulating a datetime in history. Used for backtesting to simplify the writing", "ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close,", "the tail n periods. Returns: pd.DataFrame: a Pandas dataframe representing all data from", "name, ref in getmembers(self, lambda m: isinstance(m, Symbol))] self.features = [ name for", "start_date(self): \"\"\"First Timestamp of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\"", "for backtesting to simplify the writing of trading algorithms. After simulating a historical", "getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None for symbol in self.symbols: getattr(self, symbol).simulate()", "name in self.ratios + self.features}) df = pd.concat([df1, df2], axis=1) if dropna: df", "ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None for symbol in", "time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self):", "[ name for name, ref in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios =", "ExampleDF() \"\"\" def __init__(self): # noqa: D107 self.symbols = [ name for name,", "self.symbols], axis=1) else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series for", "= pd.concat([df1, df2], axis=1) if dropna: df = df.dropna() if periods: df =", "denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self): # noqa: D107 self.symbols =", "noqa: D107 self.symbols = [ name for name, ref in getmembers(self, lambda m:", "in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios = [ name for name, ref", "\"\"\"Return the time-series index. Returns: pd.Index: a pandas timeseries index. \"\"\" return self.to_pandas().index", "margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns, features and", "pd.DataFrame: a Pandas dataframe representing all data from the MargotDataFrame \"\"\" # Get", "(int, optional): only return the tail n periods. Returns: pd.DataFrame: a Pandas dataframe", "name for name, ref in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios = [", "= df.dropna() if periods: df = df.tail(periods) return df def refresh(self): \"\"\"Refresh all", "noqa: D102 self._when = when def simulate(self, when): \"\"\"Create a dataframe simulating a", "\"\"\"First Timestamp of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return", "datetime or pd.Timestamp): when to go back to. \"\"\" self._when = when for", "getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None", "isinstance(m, BaseFeature))] self.ratios = [ name for name, ref in getmembers(self, lambda m:", "pd.DataFrame: \"\"\"Return a pandas Dataframe representing this MargotDataFrame. Args: periods (int, optional): only", "for name, ref in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios = [ name", "return self._when @when.setter def set_when(self, when): # noqa: D102 self._when = when def", "elif len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1)", "when for symbol in self.symbols: getattr(self, symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when)", "pandas them and ensemble. if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols)", "= [ name for name, ref in getmembers(self, lambda m: isinstance(m, Symbol))] self.features", "lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int = None, dropna=True) ->", "getmembers import pandas as pd from margot.data.features import BaseFeature from margot.data.symbols import Symbol", "a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value of", "index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last", "self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas() for name in self.symbols],", "= self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas() for name in", "@when.setter def set_when(self, when): # noqa: D102 self._when = when def simulate(self, when):", "to go back to. \"\"\" self._when = when for symbol in self.symbols: getattr(self,", "= Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def", "time-series index. Returns: pd.Index: a pandas timeseries index. \"\"\" return self.to_pandas().index @property def", "when def simulate(self, when): \"\"\"Create a dataframe simulating a datetime in history. Used", "# Get the elements one at a time, to pandas them and ensemble.", "member in self.symbols: getattr(self, member).refresh() # TODO what about ratios? @property def start_date(self):", "+ self.features}) df = pd.concat([df1, df2], axis=1) if dropna: df = df.dropna() if", "Symbol from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns,", "= pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series for name in self.ratios +", "one at a time, to pandas them and ensemble. if len(self.symbols) == 1:", "if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1 =", "MargotDataFrame brings together symbols, columns, features and ratios. Example:: class Equity(Symbol): adj_close =", "Returns: pd.DataFrame: a Pandas dataframe representing all data from the MargotDataFrame \"\"\" #", "pd from margot.data.features import BaseFeature from margot.data.symbols import Symbol from margot.data.ratio import Ratio", "ensemble. if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif len(self.symbols) > 1: df1", "timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value of the time-series", "value of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max()", "Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp", "Pandas dataframe representing all data from the MargotDataFrame \"\"\" # Get the elements", "axis=1) else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series for name", "def end_simulation(self): self._when = None for symbol in self.symbols: getattr(self, symbol).simulate() for feature", "simulate(self, when): \"\"\"Create a dataframe simulating a datetime in history. Used for backtesting", "self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value of the time-series index. Returns: Timestamp:", "of trading algorithms. After simulating a historical datetime, it is not possible to", "as pd from margot.data.features import BaseFeature from margot.data.symbols import Symbol from margot.data.ratio import", "Returns: pd.Index: a pandas timeseries index. \"\"\" return self.to_pandas().index @property def when(self): return", "for name in self.symbols], axis=1) else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name):", "m: isinstance(m, BaseFeature))] self.ratios = [ name for name, ref in getmembers(self, lambda", "def to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe", "the MargotDataFrame \"\"\" # Get the elements one at a time, to pandas", "m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame:", "and ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol", "name in self.symbols], axis=1) else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self,", "MargotDataFrame \"\"\" # Get the elements one at a time, to pandas them", "self.ratios = [ name for name, ref in getmembers(self, lambda m: isinstance(m, Ratio))]", "in self.features: getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when", "ref in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int =", "getattr(self, symbol).simulate() for feature in self.features: getattr(self, feature).simulate() for ratio in self.ratios: getattr(self,", "periods: df = df.tail(periods) return df def refresh(self): \"\"\"Refresh all Symbols in this", "pandas Dataframe representing this MargotDataFrame. Args: periods (int, optional): only return the tail", "is not possible to go back to the future. Args: when (tz_aware datetime", "Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns, features and ratios. Example::", "@property def index(self): \"\"\"Return the time-series index. Returns: pd.Index: a pandas timeseries index.", "-> pd.DataFrame: \"\"\"Return a pandas Dataframe representing this MargotDataFrame. Args: periods (int, optional):", "self.symbols: getattr(self, symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when) for ratio in self.ratios:", "MargotDataFrame. Args: periods (int, optional): only return the tail n periods. Returns: pd.DataFrame:", "if periods: df = df.tail(periods) return df def refresh(self): \"\"\"Refresh all Symbols in", "getmembers(self, lambda m: isinstance(m, Symbol))] self.features = [ name for name, ref in", "name).series for name in self.ratios + self.features}) df = pd.concat([df1, df2], axis=1) if", "lambda m: isinstance(m, Symbol))] self.features = [ name for name, ref in getmembers(self,", "writing of trading algorithms. After simulating a historical datetime, it is not possible", "a pandas Dataframe representing this MargotDataFrame. Args: periods (int, optional): only return the", "Get the elements one at a time, to pandas them and ensemble. if", "= when def simulate(self, when): \"\"\"Create a dataframe simulating a datetime in history.", "simplify the writing of trading algorithms. After simulating a historical datetime, it is", "= finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo", "D107 self.symbols = [ name for name, ref in getmembers(self, lambda m: isinstance(m,", "def end_date(self): \"\"\"Last Timestamp value of the time-series index. Returns: Timestamp: a pandas", "window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio =", "pandas as pd from margot.data.features import BaseFeature from margot.data.symbols import Symbol from margot.data.ratio", "adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame):", "time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self):", "in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None for symbol in self.symbols:", "name for name, ref in getmembers(self, lambda m: isinstance(m, Symbol))] self.features = [", "isinstance(m, Symbol))] self.features = [ name for name, ref in getmembers(self, lambda m:", "trading algorithms. After simulating a historical datetime, it is not possible to go", "@property def when(self): return self._when @when.setter def set_when(self, when): # noqa: D102 self._when", "super().__init__() def to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas", "= av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy", "\"\"\" self._when = when for symbol in self.symbols: getattr(self, symbol).simulate(when) for feature in", "log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE')", "return the tail n periods. Returns: pd.DataFrame: a Pandas dataframe representing all data", "from margot.data.symbols import Symbol from margot.data.ratio import Ratio class MargotDataFrame(object): \"\"\"A MargotDataFrame brings", "getattr(self, name).series for name in self.ratios + self.features}) df = pd.concat([df1, df2], axis=1)", "getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios = [ name for name, ref in", "periods (int, optional): only return the tail n periods. Returns: pd.DataFrame: a Pandas", "dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe representing this MargotDataFrame. Args: periods (int,", "backtesting to simplify the writing of trading algorithms. After simulating a historical datetime,", "for name, ref in getmembers(self, lambda m: isinstance(m, Symbol))] self.features = [ name", "None for symbol in self.symbols: getattr(self, symbol).simulate() for feature in self.features: getattr(self, feature).simulate()", "# noqa: D102 self._when = when def simulate(self, when): \"\"\"Create a dataframe simulating", "finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio", "= [ name for name, ref in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__()", "ratios? @property def start_date(self): \"\"\"First Timestamp of the time-series index. Returns: Timestamp: a", "in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int = None,", "simulating a datetime in history. Used for backtesting to simplify the writing of", "\"\"\"Create a dataframe simulating a datetime in history. Used for backtesting to simplify", "Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns',", "or pd.Timestamp): when to go back to. \"\"\" self._when = when for symbol", "Symbol))] self.features = [ name for name, ref in getmembers(self, lambda m: isinstance(m,", "= [ name for name, ref in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios", "in this DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh() # TODO what about", "After simulating a historical datetime, it is not possible to go back to", "def index(self): \"\"\"Return the time-series index. Returns: pd.Index: a pandas timeseries index. \"\"\"", "algorithms. After simulating a historical datetime, it is not possible to go back", "this DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh() # TODO what about ratios?", "to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe representing", "time, to pandas them and ensemble. if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas()", "member).refresh() # TODO what about ratios? @property def start_date(self): \"\"\"First Timestamp of the", "@property def start_date(self): \"\"\"First Timestamp of the time-series index. Returns: Timestamp: a pandas", "def set_when(self, when): # noqa: D102 self._when = when def simulate(self, when): \"\"\"Create", "BaseFeature))] self.ratios = [ name for name, ref in getmembers(self, lambda m: isinstance(m,", "for symbol in self.symbols: getattr(self, symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when) for", "trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF()", "self.symbols = [ name for name, ref in getmembers(self, lambda m: isinstance(m, Symbol))]", "to the future. Args: when (tz_aware datetime or pd.Timestamp): when to go back", "at a time, to pandas them and ensemble. if len(self.symbols) == 1: df1", "self._when = when for symbol in self.symbols: getattr(self, symbol).simulate(when) for feature in self.features:", "pd.DataFrame() df2 = pd.DataFrame({('margot', name): getattr(self, name).series for name in self.ratios + self.features})", "in getmembers(self, lambda m: isinstance(m, Symbol))] self.features = [ name for name, ref", "pd.concat([df1, df2], axis=1) if dropna: df = df.dropna() if periods: df = df.tail(periods)", "only return the tail n periods. Returns: pd.DataFrame: a Pandas dataframe representing all", "this MargotDataFrame. Args: periods (int, optional): only return the tail n periods. Returns:", "axis=1) if dropna: df = df.dropna() if periods: df = df.tail(periods) return df", "Timestamp of the time-series index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min()", "back to. \"\"\" self._when = when for symbol in self.symbols: getattr(self, symbol).simulate(when) for", "return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value of the time-series index. Returns:", "class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30)", "n periods. Returns: pd.DataFrame: a Pandas dataframe representing all data from the MargotDataFrame", "Args: periods (int, optional): only return the tail n periods. Returns: pd.DataFrame: a", "to simplify the writing of trading algorithms. After simulating a historical datetime, it", "getattr(self, member).refresh() # TODO what about ratios? @property def start_date(self): \"\"\"First Timestamp of", "not possible to go back to the future. Args: when (tz_aware datetime or", "for ratio in self.ratios: getattr(self, ratio).simulate(when) def end_simulation(self): self._when = None for symbol", "DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh() # TODO what about ratios? @property", "index(self): \"\"\"Return the time-series index. Returns: pd.Index: a pandas timeseries index. \"\"\" return", "pandas timeseries index. \"\"\" return self.to_pandas().index @property def when(self): return self._when @when.setter def", "for member in self.symbols: getattr(self, member).refresh() # TODO what about ratios? @property def", "brings together symbols, columns, features and ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted',", "index. Returns: pd.Index: a pandas timeseries index. \"\"\" return self.to_pandas().index @property def when(self):", "symbol).simulate() for feature in self.features: getattr(self, feature).simulate() for ratio in self.ratios: getattr(self, ratio).simulate()", "len(self.symbols) > 1: df1 = pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1) else:", "def refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\" for member in self.symbols: getattr(self,", "self.features = [ name for name, ref in getmembers(self, lambda m: isinstance(m, BaseFeature))]", "Dataframe representing this MargotDataFrame. Args: periods (int, optional): only return the tail n", "end_simulation(self): self._when = None for symbol in self.symbols: getattr(self, symbol).simulate() for feature in", "tail n periods. Returns: pd.DataFrame: a Pandas dataframe representing all data from the", "class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo = Equity(symbol='VTWO', trading_calendar='NYSE') spy_russ_ratio = Ratio(numerator=spy.adj_close,", "\"\"\" return self.to_pandas().index @property def when(self): return self._when @when.setter def set_when(self, when): #", "name, ref in getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int", "together symbols, columns, features and ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close')", "a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series index.", "time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY',", "df = pd.concat([df1, df2], axis=1) if dropna: df = df.dropna() if periods: df", "all Symbols in this DataFrame.\"\"\" for member in self.symbols: getattr(self, member).refresh() # TODO", "self.ratios + self.features}) df = pd.concat([df1, df2], axis=1) if dropna: df = df.dropna()", "Ratio))] super().__init__() def to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame: \"\"\"Return a", "go back to the future. Args: when (tz_aware datetime or pd.Timestamp): when to", "getattr(self, symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self,", "it is not possible to go back to the future. Args: when (tz_aware", "periods. Returns: pd.DataFrame: a Pandas dataframe representing all data from the MargotDataFrame \"\"\"", "= when for symbol in self.symbols: getattr(self, symbol).simulate(when) for feature in self.features: getattr(self,", "\"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series index. Returns: pd.Index: a", "to. \"\"\" self._when = when for symbol in self.symbols: getattr(self, symbol).simulate(when) for feature", "pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value of the", "Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return the", "what about ratios? @property def start_date(self): \"\"\"First Timestamp of the time-series index. Returns:", "in self.symbols: getattr(self, symbol).simulate(when) for feature in self.features: getattr(self, feature).simulate(when) for ratio in", "df.tail(periods) return df def refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\" for member", "optional): only return the tail n periods. Returns: pd.DataFrame: a Pandas dataframe representing", "self._when = None for symbol in self.symbols: getattr(self, symbol).simulate() for feature in self.features:", "index. Returns: Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return", "a time, to pandas them and ensemble. if len(self.symbols) == 1: df1 =", "name).to_pandas() for name in self.symbols], axis=1) else: df1 = pd.DataFrame() df2 = pd.DataFrame({('margot',", "Used for backtesting to simplify the writing of trading algorithms. After simulating a", "pd.DataFrame({('margot', name): getattr(self, name).series for name in self.ratios + self.features}) df = pd.concat([df1,", "mydf = ExampleDF() \"\"\" def __init__(self): # noqa: D107 self.symbols = [ name", "getmembers(self, lambda m: isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int = None, dropna=True)", "timestamp. \"\"\" return self.to_pandas().index.max() @property def index(self): \"\"\"Return the time-series index. Returns: pd.Index:", "datetime in history. Used for backtesting to simplify the writing of trading algorithms.", "for feature in self.features: getattr(self, feature).simulate(when) for ratio in self.ratios: getattr(self, ratio).simulate(when) def", "> 1: df1 = pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1) else: df1", "to pandas them and ensemble. if len(self.symbols) == 1: df1 = self.symbols[0].to_pandas() elif", "finance.LogReturns(column='adj_close') realised_vol = finance.RealisedVolatility(column='log_returns', window=30) class ExampleDF(MargotDataFrame): spy = Equity(symbol='SPY', trading_calendar='NYSE') vtwo =", "Ratio(numerator=spy.adj_close, denominator=vtwo.adj_close, label='spy_russ') mydf = ExampleDF() \"\"\" def __init__(self): # noqa: D107 self.symbols", "when to go back to. \"\"\" self._when = when for symbol in self.symbols:", "m: isinstance(m, Symbol))] self.features = [ name for name, ref in getmembers(self, lambda", "Timestamp: a pandas timestamp. \"\"\" return self.to_pandas().index.min() @property def end_date(self): \"\"\"Last Timestamp value", "a Pandas dataframe representing all data from the MargotDataFrame \"\"\" # Get the", "for symbol in self.symbols: getattr(self, symbol).simulate() for feature in self.features: getattr(self, feature).simulate() for", "= pd.DataFrame({('margot', name): getattr(self, name).series for name in self.ratios + self.features}) df =", "return self.to_pandas().index @property def when(self): return self._when @when.setter def set_when(self, when): # noqa:", "possible to go back to the future. Args: when (tz_aware datetime or pd.Timestamp):", "ref in getmembers(self, lambda m: isinstance(m, Symbol))] self.features = [ name for name,", "D102 self._when = when def simulate(self, when): \"\"\"Create a dataframe simulating a datetime", "symbol in self.symbols: getattr(self, symbol).simulate() for feature in self.features: getattr(self, feature).simulate() for ratio", "name): getattr(self, name).series for name in self.ratios + self.features}) df = pd.concat([df1, df2],", "ratios. Example:: class Equity(Symbol): adj_close = av.Column(function='historical_daily_adjusted', time_series='adjusted_close') log_returns = finance.LogReturns(column='adj_close') realised_vol =", "history. Used for backtesting to simplify the writing of trading algorithms. After simulating", "Args: when (tz_aware datetime or pd.Timestamp): when to go back to. \"\"\" self._when", "the time-series index. Returns: pd.Index: a pandas timeseries index. \"\"\" return self.to_pandas().index @property", "# TODO what about ratios? @property def start_date(self): \"\"\"First Timestamp of the time-series", "back to the future. Args: when (tz_aware datetime or pd.Timestamp): when to go", "representing all data from the MargotDataFrame \"\"\" # Get the elements one at", "from inspect import getmembers import pandas as pd from margot.data.features import BaseFeature from", "ref in getmembers(self, lambda m: isinstance(m, BaseFeature))] self.ratios = [ name for name,", "self.symbols: getattr(self, symbol).simulate() for feature in self.features: getattr(self, feature).simulate() for ratio in self.ratios:", "= None, dropna=True) -> pd.DataFrame: \"\"\"Return a pandas Dataframe representing this MargotDataFrame. Args:", "future. Args: when (tz_aware datetime or pd.Timestamp): when to go back to. \"\"\"", "self.symbols: getattr(self, member).refresh() # TODO what about ratios? @property def start_date(self): \"\"\"First Timestamp", "historical datetime, it is not possible to go back to the future. Args:", "1: df1 = pd.concat([getattr(self, name).to_pandas() for name in self.symbols], axis=1) else: df1 =", "def when(self): return self._when @when.setter def set_when(self, when): # noqa: D102 self._when =", "in history. Used for backtesting to simplify the writing of trading algorithms. After", "class MargotDataFrame(object): \"\"\"A MargotDataFrame brings together symbols, columns, features and ratios. Example:: class", "\"\"\"A MargotDataFrame brings together symbols, columns, features and ratios. Example:: class Equity(Symbol): adj_close", "isinstance(m, Ratio))] super().__init__() def to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame: \"\"\"Return", "return df def refresh(self): \"\"\"Refresh all Symbols in this DataFrame.\"\"\" for member in" ]
[ "from ..classes import PersonWroteReview, ReviewOfProduct, IsGoldenFlag import random from ..meta_classes import DataSetProperties from", "for review in data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person,", "random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)), set()) writer.create_edge_if_not_exists(ReviewOfProduct(review.id, review.of_product, IsGoldenFlag(False)),", "..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter from graph_io", "= SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id,", "client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes()", "graph_io import QueryParams, CypherQuery from tqdm import tqdm def run(client, data_set_properties: DataSetProperties): with", "#client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())):", "ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"})", "enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in", "import PersonWroteReview, ReviewOfProduct, IsGoldenFlag import random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import", ":NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for", "i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"})", "import random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import", "review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)), set()) writer.create_edge_if_not_exists(ReviewOfProduct(review.id,", ":NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i, product", "in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review", "from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter from", "{\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON", "for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test =", "from tqdm import tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as", "DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter from graph_io import QueryParams,", "def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet", "product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for", "for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person,", "<filename>data_sets/synthetic_review_prediction/article_0/generate.py from ..classes import PersonWroteReview, ReviewOfProduct, IsGoldenFlag import random from ..meta_classes import DataSetProperties", "<= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)), set()) writer.create_edge_if_not_exists(ReviewOfProduct(review.id, review.of_product, IsGoldenFlag(False)), set())", "writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person):", "create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())):", "INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product,", "CypherQuery from tqdm import tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"})", "data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)), set())", "SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON", "from ..utils import DatasetWriter from graph_io import QueryParams, CypherQuery from tqdm import tqdm", "in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test = random.random() <= 0.1", "from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter from graph_io import QueryParams, CypherQuery", "= random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)), set()) writer.create_edge_if_not_exists(ReviewOfProduct(review.id, review.of_product,", "dataset_name)\"), QueryParams()) pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i,", "random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter", "IsGoldenFlag import random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils", "import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter from graph_io import", "SimpleDataSet from ..utils import DatasetWriter from graph_io import QueryParams, CypherQuery from tqdm import", "create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass", "{\"style_preference\"}) for review in data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"})", "{\"style\"}) for i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test", "import SimpleDataSet from ..utils import DatasetWriter from graph_io import QueryParams, CypherQuery from tqdm", "QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i, product in", "tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set:", "ReviewOfProduct, IsGoldenFlag import random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet from", "enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review,", "DatasetWriter from graph_io import QueryParams, CypherQuery from tqdm import tqdm def run(client, data_set_properties:", "DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE", "review in data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id,", "..experiment_1.simple_data_set import SimpleDataSet from ..utils import DatasetWriter from graph_io import QueryParams, CypherQuery from", "DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def", "..classes import PersonWroteReview, ReviewOfProduct, IsGoldenFlag import random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set", "as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"),", "person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test = random.random() <=", "writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE", "i, person in enumerate(tqdm(data_set.generate_public_people())): writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test = random.random()", "SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"),", "PersonWroteReview, ReviewOfProduct, IsGoldenFlag import random from ..meta_classes import DataSetProperties from ..experiment_1.simple_data_set import SimpleDataSet", "QueryParams, CypherQuery from tqdm import tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name,", "data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX", "INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for", "..utils import DatasetWriter from graph_io import QueryParams, CypherQuery from tqdm import tqdm def", "data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties)", "import tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset()", "from graph_io import QueryParams, CypherQuery from tqdm import tqdm def run(client, data_set_properties: DataSetProperties):", "def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams())", "in data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\", \"test\"}) writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)),", "QueryParams()) pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person", "ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id, dataset_name)\"), QueryParams()) pass create_indexes() for i,", "import DatasetWriter from graph_io import QueryParams, CypherQuery from tqdm import tqdm def run(client,", "tqdm import tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer:", "writer.create_node_if_not_exists(person, {\"style_preference\"}) for review in data_set.generate_reviews(person): review.test = random.random() <= 0.1 writer.create_node_if_not_exists(review, {\"score\",", "with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes():", "writer: writer.nuke_dataset() data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams())", "import QueryParams, CypherQuery from tqdm import tqdm def run(client, data_set_properties: DataSetProperties): with DatasetWriter(client,", "run(client, data_set_properties: DataSetProperties): with DatasetWriter(client, data_set_properties.dataset_name, {\"is_golden\",\"\"}) as writer: writer.nuke_dataset() data_set: SimpleDataSet =", "data_set: SimpleDataSet = SimpleDataSet(data_set_properties) def create_indexes(): client.execute_cypher_write(CypherQuery(\"CREATE INDEX ON :NODE(id)\"), QueryParams()) #client.execute_cypher_write(CypherQuery(\"CREATE INDEX", "pass create_indexes() for i, product in enumerate(tqdm(data_set.generate_public_products())): writer.create_node_if_not_exists(product, {\"style\"}) for i, person in" ]
[]
[ "<<EMAIL>> 1551711685 -0500 commit: Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown", "test_get_detected_points(left, right, index): l = pandas.Series(left) r = pandas.Series(right) rl, rr = widgets.get_valid_points(l,", "([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected): s", "([0, .1, .2, .3, .4, .5], False) ] ) def test_fixed(series, expected): s", "# Check the `inner` option. left = pandas.Series([0, 0, .3, .4, .4, .4,", "] ) def test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) ==", "@patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout:", "= [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [0.170, 0.55,", "expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist()", "78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e", "[0, .14, 0, 0, 0], [1]), ([0, 0, 0, 0, 0], [0, .14,", "= widgets.get_valid_points(left, right, 0.03, inner = True) assert result_left.tolist() == expected_left assert result_right.tolist()", "1.00, 1.00, 1.00, 1.00]) expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069,", "4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index) def test_get_detected_points_inner(): left", ".8], 'right': [.1, .2, .3, .3, .3, .3], }, index = range(3, 9))", ".9, 1, 0]) right = pandas.Series([0, 0, 0, .1, .2, .3, .3, .3,", "0.07, 0.081, 0.069, 0.042] expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00,", "1], False) ] ) def test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03,", ".1, .2, .3, .4, .5], False) ] ) def test_fixed(series, expected): s =", "when comparing items rather than a scalar result, # Let's check the values", "index = range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner =", "= widgets.get_undetected(values, 0.03) # Since pandas likes to return a series of bool", "expected\", [ (3, 3, 1), (4, 2, 6), (6, 3, 20) ] )", "0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left = [0.000, 0.00,", "rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index) assert list(rl.index) == index", "right, dlimit = 0.03) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right switched_result_left,", "1.00]) expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right =", "assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9] assert", "l = pandas.Series(left) r = pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03) assert", "Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method \"\"\" expected_hash = \"f086ec9\"", "True), ([0, 1, 0, 1, 0, 1], True), ([0, .2, 1, 1, 1],", "assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000,", "<<EMAIL>> 1551711670 -0500 checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>>", "def test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series,", "1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner = True) assert result_left.tolist()", "dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0', 'X9', 'xc', 'x33', 'col4'],", "0.9) # Since pandas likes to return a series of bool values when", "result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000,", "[ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000,", "left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right", "test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype(): table =", "table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B',", "assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0, 0]", "([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454,", "test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize( \"values, expected\",", "0, 0, .14, .53, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97)", "0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([]))", "0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit", "expected_right assert switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07, 0.081, 0.069, 0.042] expected_right", "0, .1, .2, .3, .3, .3, .3, 0, 0, 0]) result_left, result_right =", "'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map ==", "0.03, 0.97) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8,", "== [.1, .2, .3, .3, .3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00,", "@pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)), ([0.2,", ".2, 1, 1, 1], True), ([0, .1, .2, .3, .4, .5], False) ]", "1, 0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result", ") def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values,", "0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525,", "0], [0, .14, 0, 1, 1], [1, 2, 3, 4]), ] ) def", "right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3, 4] #", "['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table = table.set_index('genotype') expected_map", "widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3, 4]", "@pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index", "0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s,", "pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911],", "directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\",", "78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown", "6, 7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert [2, 3, 4,", "s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0,", "0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left,", "0] expected = pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7, .8], 'right': [.1,", "6, 7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert [3, 4, 5]", "right, 0.03, inner = True) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right", "1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9)", "['s', 't', 'r', 'i', 'n', 'g', '1']) ] ) def test_coerce_to_list(values, expected): result", "1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist() == expected_left", "list(result_right.index) assert list(result_left.index) == [3, 4] # Check the `inner` option. left =", "inner = True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0, 0, 0,", "0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner = True) assert", "table = pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] }", "right = pandas.Series([0, 0, 0, .14, .53, 1, 1]) result_left, result_right = widgets.get_detected_points(left,", ".5, .6, .7, .8] assert result_right.tolist() == [.1, .2, .3, .3, .3, .3]", "assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left, right,", "@pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i',", "0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000,", "([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910],", "} ) table = table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A',", "1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1,", ".4, .4, 1, 1]) right = pandas.Series([0, 0, 0, .1, .1, .1, .2,", "([1, .1, .1, .1], 0) ] ) def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values,", "['Genotype', 0, 1 ,2, 3], [0, 1, 2, 3]) ] ) def test_get_numeric_columns(columns,", "1]) assert [2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner", "0,0], index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index", "1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910],", ") def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) # Since pandas likes to", "[0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "'X9', 'x33']), ( ['Genotype', 0, 1 ,2, 3], [0, 1, 2, 3]) ]", "index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] )", "1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1, .1,", "result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() == [.1, .2, .3,", "0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911,", "0, 0, 0, 0, 1, 1]) right = pandas.Series([0, 0, 0, .14, .53,", "result_left.tolist() == [.3, .4, .5, .6, .7, .8] assert result_right.tolist() == [.1, .2,", "left = pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001, 0.005]) right =", "('string1', ['s', 't', 'r', 'i', 'n', 'g', '1']) ] ) def test_coerce_to_list(values, expected):", "0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454,", "0.9) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000,", ".4, .5, .6, .7, .8, .9, 1, 0]) right = pandas.Series([0, 0, 0,", "[.3, .4, .5, .6, .7, .8] assert result_right.tolist() == [.1, .2, .3, .3,", "def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) # Since pandas likes to return", "([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1,", "= widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4,", ".03, .97, inner = True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0,", "(3,3)) ] ) def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97)", ".1, .1, .1], None), ([1, .1, .1, .1], 0) ] ) def test_get_first_fixed_timepoint(values,", "import patch from loguru import logger import pandas import pytest from muller import", "1.00, 1.00, 1.00, 1.00, 1.00]) expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081,", "'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']), ( ['Genotype', 0, 1 ,2, 3],", "widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6,", "\"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index =", "[3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index) def test_get_detected_points_inner():", "right, 0.03, 0.97, inner = True) assert result_left.tolist() == [.3, .4, .5, .6,", "0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner = True)", "0, 0, 0], [1, 2, 3]), ([0, 0, 0, 0, 0], [0, .14,", "True), ([0, .2, 1, 1, 1], False) ] ) def test_fixes_imediately(series, expected): s", "to bool for safety. assert result.tolist() == [bool(i) for i in expected] @pytest.mark.parametrize(", "dlimit = 0.03) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right", "False)[0].index) assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner", "== expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True), ([0,", "0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right,", ".8, .9] assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3,", ".7, .8], 'right': [.1, .2, .3, .3, .3, .3], }, index = range(3,", "[0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [1.00, 1.00, 1.00, 1.00, 1.00] result_left,", "filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [", ".3, .3, .3, .3, 0, 0, 0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03)", "the `inner` option. left = pandas.Series([0, 0, .3, .4, .4, .4, 1, 1])", "1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261,", "assert result_left.tolist() == [.3, .4, .5, .6, .7, .8] assert result_right.tolist() == [.1,", "test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from", "test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0, 0]) right = pandas.Series([0, .14, 0,", "pandas.Series([0, 0, 0, 0, 0]) right = pandas.Series([0, .14, 0, 1, 1]) result_left,", "1, 0, 1], True), ([0, .2, 1, 1, 1], False) ] ) def", "'B1|B2', 'C1'] } ) table = table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A',", "2, 3, 4]), ([0, 1, 0, 0.2, 0], [0, .14, 0, 0, 0],", "0, 0, 0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1,", "0, 0], [1, 2, 3]), ([0, 0, 0, 0, 0], [0, .14, .23,", "0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected): result", "expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4, 5], [.23, .14,", "result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0] expected =", "True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001,", ".3, 0] expected = pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7, .8], 'right':", "0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) #", "0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1] left = pandas.Series([0,", "([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000,", "'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']), ( ['Genotype', 0, 1", "assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3, 4] # Check the `inner`", "[0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right, 0.9) #", "from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based", "result_left.tolist() == expected_left assert result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03,", "result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1] assert", "'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table = table.set_index('genotype')", "[.1, .2, .3, .3, .3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000,", "assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1],", "(6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected): result = widgets.calculate_number_of_combinations(elements, size)", ".1, .1, .2, 1]) assert [2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left,", "1, 1, 4, 5], [.23, .14, .13, 0, 0], [0, 1, 2, 3,", ".3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07,", "@pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66',", "assert [2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner =", "and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick", "assert switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07, 0.081, 0.069, 0.042] expected_right =", "0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000,", "0, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) ==", "result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) ==", ".6, .7, .8] assert result_right.tolist() == [.1, .2, .3, .3, .3, .3] def", "0.454], index = [3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525,", "1], True), ([0, 1, 0, 1, 0, 1], True), ([0, .2, 1, 1,", "== [.1, .2, .3, .4, .5, .6, .7, .8, .9] assert result_right.tolist() ==", "] ) def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9) # Since pandas", "'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert", "pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0,", "0.97) assert result == expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0, 0.261,", "expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])),", "1, 1]) right = pandas.Series([0, 0, 0, .1, .1, .1, .2, 1]) assert", "0, 0, 0, 0], [0, .14, 0, 0, 0], [1]), ([0, 0, 0,", "([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n', 'g', '1']) ] ) def", "0.005]) right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ]) l,r =", "( ['Genotype', 0, 1 ,2, 3], [0, 1, 2, 3]) ] ) def", "1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist()", "widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4, 5],", "output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4, 5], [.23, .14, .13, 0,", "result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0, 0] result_left,", "comparing items rather than a scalar result, # Let's check the values and", "0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000],", "3, 1), (4, 2, 6), (6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size,", "== [0, 0, .1, .2, .3, .3, .3, .3, 0, 0] result_left, result_right", "\"elements, size, expected\", [ (3, 3, 1), (4, 2, 6), (6, 3, 20)", "0, 1, 1], [1, 2, 3, 4]), ] ) def test_get_detected_points(left, right, index):", "([0, 0, 0, 0, 0], [0, .14, 0, 1, 1], [1, 2, 3,", "0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000, 0.000,", "0, 0, 0], [0, .14, 0, 1, 1], [1, 2, 3, 4]), ]", "0, 1, 0, 1], True), ([0, .2, 1, 1, 1], True), ([0, .1,", "unittest.mock import patch from loguru import logger import pandas import pytest from muller", "pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810],", "== expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert", ".7, .8] assert result_right.tolist() == [.1, .2, .3, .3, .3, .3] def test_get_valid_points_complex():", "0, 1, 0, 1], True), ([0, .2, 1, 1, 1], False) ] )", "0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000,", "0.910], 5), ([.1, .1, .1, .1], None), ([1, .1, .1, .1], 0) ]", "index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] )", "index directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\",", "= widgets.get_fixed(values, 0.9) # Since pandas likes to return a series of bool", "1] assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0,", "\"series,expected\", [ ([0, 0, 0, 1, 1, 1], True), ([0, 1, 0, 1,", ".3, .3, .3, .3], }, index = range(3, 9)) result_left, result_right = widgets.get_valid_points(left,", "expected): result = widgets.get_overlap_regions(left, right, 0.9) # Convert to bool for safety. assert", ".4, 1, 1]) right = pandas.Series([0, 0, 0, .1, .1, .1, .2, 1])", "widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left expected_left =", "== list(result_right.index) assert list(result_left.index) == [3, 4] # Check the `inner` option. left", "1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000,", "assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [", ".3], }, index = range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97,", "([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000,", "== list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert [2, 3, 4, 5, 6]", "== list(result_right.index) assert list(result_left.index) == [1] left = pandas.Series([0, 0, 0, 0, 0,", "0]) right = pandas.Series([0, 0, 0, .1, .2, .3, .3, .3, .3, 0,", "inner = True) assert result_left.tolist() == [.3, .4, .5, .6, .7, .8] assert", "assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3, 1,", "expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize(", "0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected): result =", "0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1] left = pandas.Series([0, 0,", "[.23, .14, .13, 0, 0], [0, 1, 2, 3, 4]), ([0, 1, 0,", "<<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500", "in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't',", "s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize( \"left,", ".1, .2, .3, .3, .3, .3, 0, 0, 0]) result_left, result_right = widgets.get_valid_points(left,", "= pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize( \"left, right,", "\"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert expected_hash ==", "assert result == expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A', 'B',", "0, 0, .1, .1, .1, .2, 1]) assert [2, 3, 4, 5, 6,", "[ ([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)), ([0.2, 1, 0.2, 0.98,", "expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True),", "result, # Let's check the values and index directly. assert result.tolist() == expected.tolist()", ") def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right, 0.9) # Convert to", "0.4, 0.5], None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3)) ] ) def", "1, 1, 1], True), ([0, .1, .2, .3, .4, .5], False) ] )", "import pandas import pytest from muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [", "= widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() == [.3, .4,", "Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert", "(pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n', 'g', '1']) ]", "0], [0, .14, 0, 0, 0], [1, 2, 3]), ([0, 0, 0, 0,", "list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0,", "result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True), ([0, 1,", ".8, .9, 1, 0]) right = pandas.Series([0, 0, 0, .1, .2, .3, .3,", "0, 0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55,", "'B', 'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize(", "0, 1, 1]) right = pandas.Series([0, 0, 0, .14, .53, 1, 1]) result_left,", "Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit:", "= False)[0].index) assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97,", "045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e", "0, .1, .2, .3, .3, .3, .3, 0] expected = pandas.DataFrame({ 'left': [.3,", "True) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left,", ".2, .3, .4, .5, .6, .7, .8, .9, 1, 0]) right = pandas.Series([0,", "list(result_left.index) == [3, 4] # Check the `inner` option. left = pandas.Series([0, 0,", "pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454],", "f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method \"\"\" expected_hash =", "0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3, 1,", "0.810], index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ]", "def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0, 0]) right = pandas.Series([0, .14,", "= pandas.Series([0, 0, 0, .14, .53, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right,", "= range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True)", "0, 0.2, 0], [0, .14, 0, 0, 0], [1, 2, 3]), ([0, 0,", "assert result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner =", "== [0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3, .4, .5, .6,", "0.042] expected_right = [1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right,", "right, .03, .97, inner = True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0,", "True) assert result_left.tolist() == [.3, .4, .5, .6, .7, .8] assert result_right.tolist() ==", ".1], 0) ] ) def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result", "([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000,", ".3, .3, .3, 0] expected = pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7,", "== expected_right assert switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07, 0.081, 0.069, 0.042]", "1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner = True) assert", "assert r.tolist() == [0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3, .4,", "commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default", "expected): result = widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame(", "import pytest from muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66',", "= pandas.Series([0, 0, 0, .1, .1, .1, .2, 1]) assert [2, 3, 4,", "r.tolist() == [0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3, .4, .5,", "0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3, 1, 0.5,", "1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9) #", "values and index directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize(", "result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1, .2, .3, .4, .5,", "0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00,", "Let's check the values and index directly. assert result.tolist() == expected.tolist() assert list(result.index)", "widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced():", "def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values, expected\",", "return a series of bool values when comparing items rather than a scalar", "([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000,", "[.1, .2, .3, .4, .5, .6, .7, .8, .9, 1] assert result_right.tolist() ==", "Default Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash()", ".1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 0]) right =", "] ) def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) # Since pandas likes", "] ) def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected", ") def test_get_detected_points(left, right, index): l = pandas.Series(left) r = pandas.Series(right) rl, rr", "right, expected): result = widgets.get_overlap_regions(left, right, 0.9) # Convert to bool for safety.", "\"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index =", "0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55,", "0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner =", "pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation", "0] result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() == [.1, .2,", "0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000, 0.000, 0.000, 0.525,", "0.97) == expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3, 1, 0.97 ,", "test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes to return", "widgets.get_overlap_regions(left, right, 0.9) # Convert to bool for safety. assert result.tolist() == [bool(i)", "'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table = table.set_index('genotype') expected_map = {'A1':", "widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1,", ".4, .5, .6, .7, .8] assert result_right.tolist() == [.1, .2, .3, .3, .3,", "option. left = pandas.Series([0, 0, .3, .4, .4, .4, 1, 1]) right =", "index = [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index", "5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index) assert [3, 4,", "== index def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0, 0]) right =", "4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert [3,", "safety. assert result.tolist() == [bool(i) for i in expected] @pytest.mark.parametrize( \"values, expected\", [", "0, 0, 0], [0, .14, 0, 0, 0], [1]), ([0, 0, 0, 0,", "r, 0.03) assert list(rl.index) == list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced(): left", "expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000,", "# Since pandas likes to return a series of bool values when comparing", ".3, .3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263,", "assert result_left.tolist() == [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file =", "result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000,", "test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master", "values when comparing items rather than a scalar result, # Let's check the", "result = widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( {", "0], [0, 1, 2, 3, 4]), ([0, 1, 0, 0.2, 0], [0, .14,", "= 0.03, inner = True) assert l.tolist() == [0.085] assert r.tolist() == [0.05]", "on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference", "0.07, 0.081, 0.069, 0.042] expected_right = [1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right", "0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected): result = widgets.get_fixed(values,", "1, 1], True), ([0, .1, .2, .3, .4, .5], False) ] ) def", "[ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525,", "[2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index)", "switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left", ".53, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) ==", "[0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000,", "0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5", ".9] assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0]", "'B1': 'B', 'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output", "'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1,", "to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on pycharm", "widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1,", "[ ([0, 0, 0, 1, 1, 1], True), ([0, 1, 0, 1, 0,", "0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) # Since", "[1]), ([0, 0, 0, 0, 0], [0, .14, 0, 1, 1], [1, 2,", "@pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4, 5], [.23, .14, .13, 0, 0],", "expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [0.170,", "([0, 1, 0, 1, 0, 1], True), ([0, .2, 1, 1, 1], False)", "result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3,", "[ (3, 3, 1), (4, 2, 6), (6, 3, 20) ] ) def", ".14, .53, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index)", "1, 1], [1, 2, 3, 4]), ] ) def test_get_detected_points(left, right, index): l", ".2, .3, .4, .5, .6, .7, .8, .9] assert result_right.tolist() == [0, 0,", ".5, .6, .7, .8], 'right': [.1, .2, .3, .3, .3, .3], }, index", "assert result_right.tolist() == [.1, .2, .3, .3, .3, .3] def test_get_valid_points_complex(): left =", "4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert [2,", "of bool values when comparing items rather than a scalar result, # Let's", "test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3, .4, .5, .6, .7, .8, .9,", "pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ]", "def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) # Since pandas likes to return", ".6, .7, .8, .9, 1, 0]) right = pandas.Series([0, 0, 0, .1, .2,", "0, 1 ,2, 3], [0, 1, 2, 3]) ] ) def test_get_numeric_columns(columns, expected):", "0.9) # Convert to bool for safety. assert result.tolist() == [bool(i) for i", "pandas.Series(left) r = pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index) ==", "result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True)", "= widgets.get_overlap_regions(left, right, 0.9) # Convert to bool for safety. assert result.tolist() ==", "'0', 'X9', 'x33']), ( ['Genotype', 0, 1 ,2, 3], [0, 1, 2, 3])", "list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert [2, 3, 4, 5, 6] ==", "'col4'], ['1', '66', '0', 'X9', 'x33']), ( ['Genotype', 0, 1 ,2, 3], [0,", "0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [0.170, 0.55, 0.947, 1.00,", "1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner = True)", "045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on pycharm code inspecter", "= pandas.Series([0, 0, .3, .4, .4, .4, 1, 1]) right = pandas.Series([0, 0,", "index = [2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index", "widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000,", "`inner` option. left = pandas.Series([0, 0, .3, .4, .4, .4, 1, 1]) right", "assert list(rl.index) == index def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0, 0])", "0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000, 0.000, 1.000,", "0.069, 0.042] expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left,", "0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3, 4] # Check", "== [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670", "None), ([1, .1, .1, .1], 0) ] ) def test_get_first_fixed_timepoint(values, expected): result =", "index): l = pandas.Series(left) r = pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03)", "0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]),", "[.3, .4, .5, .6, .7, .8], 'right': [.1, .2, .3, .3, .3, .3],", "4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index) assert [3,", "0, 0] result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() == [.1,", "= True) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right result_left, result_right =", "pandas.Series([0, .14, 0, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert", "1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000, 0.525, 0.454,", "== list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3, 1), (4, 2, 6),", "pytest from muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0',", "inner = False)[0].index) assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03,", "to return a series of bool values when comparing items rather than a", "widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0,", ".1, .1, .1, .2, 1]) assert [2, 3, 4, 5, 6, 7] ==", "\"columns, expected\", [ (['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0',", "pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0,", "def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result ==", "(4, 2, 6), (6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected): result", "assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1] left = pandas.Series([0, 0, 0,", "result_left.tolist() == [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\"", "directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [", "0, .1, .2, .3, .3, .3, .3, 0, 0] result_left, result_right = widgets.get_valid_points(left,", "= [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])),", "inner = True) assert result_left.tolist() == [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def", "@pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000,", ".14, 0, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index)", "0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3)) ]", "[0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3,", "{ 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table =", "0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left,", ".1, .1, .1], 0) ] ) def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9)", "[0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3, .4, .5, .6, .7,", "from unittest.mock import patch from loguru import logger import pandas import pytest from", "<reponame>cdeitrick/Lolipop from unittest.mock import patch from loguru import logger import pandas import pytest", "0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]),", "0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected):", "3], [0, 1, 2, 3]) ] ) def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns)", "right = pandas.Series([0, 0, 0, .1, .1, .1, .2, 1]) assert [2, 3,", "-0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed", "0, 1, 1, 1], True), ([0, 1, 0, 1, 0, 1], True), ([0,", ".2, 1, 1, 1], False) ] ) def test_fixes_imediately(series, expected): s = pandas.Series(series)", "result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>>", "commit: Changed Default Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash", "s = pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1,", "0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000,", "pandas.Series([0, 0, 0, .14, .53, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03,", ".1, .2, 1]) assert [2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right,", "1, 0, 1, 0, 1], True), ([0, .2, 1, 1, 1], False) ]", "expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right =", "than a scalar result, # Let's check the values and index directly. assert", "5], [.23, .14, .13, 0, 0], [0, 1, 2, 3, 4]), ([0, 1,", "0.001, 0.005]) right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ]) l,r", "([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])),", "([0, 0, 0, 0, 0], [0, .14, 0, 0, 0], [1]), ([0, 0,", "result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1, .2, .3, .4,", "widgets.get_valid_points(left, right, dlimit = 0.03, inner = True) assert l.tolist() == [0.085] assert", "([0, 1, 0, 0.2, 0], [0, .14, 0, 0, 0], [1, 2, 3]),", "([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1, .1, .1, .1], None),", "right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left,", "1, 0, 1, 0, 1], True), ([0, .2, 1, 1, 1], True), ([0,", ") def test_calculate_total_number_of_combinations(elements, size, expected): result = widgets.calculate_number_of_combinations(elements, size) assert result == expected", "<<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value", "from loguru import logger import pandas import pytest from muller import dataio, widgets", "expected_left assert result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner", "\"left, right, expected\", [ ([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000,", "expected\", [ ([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454,", "= widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes to return a series of", ".3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081,", ".3, .3], }, index = range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03,", "assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index) def", "== expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3,", "pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left = [0.000,", "1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4,", "[1, 2, 3]), ([0, 0, 0, 0, 0], [0, .14, .23, 0, 0],", ".7, .8, .9, 1, 0]) right = pandas.Series([0, 0, 0, .1, .2, .3,", "@pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True), ([0, 1, 0,", "widgets.get_valid_points(left, right, 0.03, inner = True) assert result_left.tolist() == expected_left assert result_right.tolist() ==", ".14, .23, 0, 0], [1, 2]), ([0, 0, 0, 0, 0], [0, .14,", "True), ([0, .1, .2, .3, .4, .5], False) ] ) def test_fixed(series, expected):", "2, 3]) ] ) def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result ==", "0], [1]), ([0, 0, 0, 0, 0], [0, .14, 0, 1, 1], [1,", "\"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master to version0.2", ".03, inner = False)[0].index) assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right,", "1.00, 1.00, 1.00]) expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]", "'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\",", "3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index) assert", "left = pandas.Series([0, 0, 0, 0, 0]) right = pandas.Series([0, .14, 0, 1,", "def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize( \"values,", "expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)),", "0, 0, 0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0, 0, 0, 0,0.05,", "1, 0.97 , 1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2,", "= \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master to", "list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1] left = pandas.Series([0, 0, 0, 0,", "expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert expected_hash == result_hash", "1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03) assert", ".3, .3, .3, .3, 0, 0] result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97)", "-0500 commit: Changed Default Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file", "0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000,", "([0, 0, 0, 1, 1, 1], True), ([0, 1, 0, 1, 0, 1],", "0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910],", "= [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index =", "== result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True), ([0,", "0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result =", "[3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index", "] ) def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) # Since pandas likes", "7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert [2, 3, 4, 5,", "== output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4, 5], [.23, .14, .13,", "0, 0], [0, .14, 0, 1, 1], [1, 2, 3, 4]), ] )", "expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [", ".03, inner = True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97,", "assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner =", "pandas import pytest from muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1',", "0.911, 0.910], 5), ([.1, .1, .1, .1], None), ([1, .1, .1, .1], 0)", ".1, .1], None), ([1, .1, .1, .1], 0) ] ) def test_get_first_fixed_timepoint(values, expected):", "([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3))", "[0, .14, .23, 0, 0], [1, 2]), ([0, 0, 0, 0, 0], [0,", "0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810],", "0, 0], [0, .14, .23, 0, 0], [1, 2]), ([0, 0, 0, 0,", "([0, 0, 0, 0, 0], [0, .14, .23, 0, 0], [1, 2]), ([0,", "3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1, .1, .1, .1],", "Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering", "pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table", "[0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values,", "expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000,", "[1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner =", ".3, .3, .3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00,", "pandas.Series([])) ] ) def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9) # Since", "0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000,", "widgets.get_undetected(values, 0.03) # Since pandas likes to return a series of bool values", "0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860, 0.000,", ".3, .4, .5, .6, .7, .8, .9] assert result_right.tolist() == [0, 0, .1,", "True) assert l.tolist() == [0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple(): left =", "[ ([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "= widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6,", ".4, .5, .6, .7, .8, .9] assert result_right.tolist() == [0, 0, .1, .2,", "difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method", "0.3, 1, 0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected): s = pandas.Series(series)", "1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1, .1, .1,", "[ ([0, 1, 1, 4, 5], [.23, .14, .13, 0, 0], [0, 1,", "== list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0,", "test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) # Since pandas likes to return a", "([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910],", "widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261,", "1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner = True) assert result_left.tolist() ==", "0], [1, 2, 3]), ([0, 0, 0, 0, 0], [0, .14, .23, 0,", "result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() == []", "1551711685 -0500 commit: Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>>", "index = [5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] )", "assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0] expected", ".6, .7, .8, .9] assert result_right.tolist() == [0, 0, .1, .2, .3, .3,", "0, 0, .1, .2, .3, .3, .3, .3, 0, 0, 0]) result_left, result_right", "right, 0.03) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8,", "0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860, 0.000, 0.000,", "0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000, 0.000, 0.525,", ".7, .8, .9, 1] assert result_right.tolist() == [0, 0, .1, .2, .3, .3,", "1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910],", "0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected): result =", "1551711670 -0500 checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685", ".2, .3, .3, .3, .3, 0, 0] result_left, result_right = widgets.get_valid_points(left, right, 0.03,", "0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([]))", "result == expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0, 0.261, 1.000, 1.000,", "] ) def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert", "a series of bool values when comparing items rather than a scalar result,", ".03, .97, inner = False)[0].index) assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left,", "# Convert to bool for safety. assert result.tolist() == [bool(i) for i in", "0, .1, .1, .1, .2, 1]) assert [2, 3, 4, 5, 6, 7]", ".3, .4, .5], False) ] ) def test_fixed(series, expected): s = pandas.Series(series) assert", "\"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index =", "([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected):", "= [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def", ".2, .3, .3, .3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00,", "series of bool values when comparing items rather than a scalar result, #", "right, .03, .97, inner = False)[0].index) assert [3, 4, 5, 6, 7] ==", "[5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000,", "expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3',", "0.03, 0.97, inner = True) assert result_left.tolist() == [.3, .4, .5, .6, .7,", "= 0.03) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right =", "0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000,", "list(result_right.index) assert list(result_left.index) == [1] left = pandas.Series([0, 0, 0, 0, 0, 1,", "pandas likes to return a series of bool values when comparing items rather", "0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000,", "0.97) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9]", "1, 0, 0.2, 0], [0, .14, 0, 0, 0], [1, 2, 3]), ([0,", "([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected):", "result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist() == expected_left assert", "0]) right = pandas.Series([0, .14, 0, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right,", ".7, .8, .9] assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3,", ") def test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result", "4, 5], [.23, .14, .13, 0, 0], [0, 1, 2, 3, 4]), ([0,", "True) assert result_left.tolist() == [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file", "0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00,", "0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1],", "pandas.Series([0, 0, 0, 0, 0, 1, 1]) right = pandas.Series([0, 0, 0, .14,", "== [3, 4] # Check the `inner` option. left = pandas.Series([0, 0, .3,", ") def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) # Since pandas likes to", "assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3, 1), (4,", "'x33']), ( ['Genotype', 0, 1 ,2, 3], [0, 1, 2, 3]) ] )", "expected_left expected_left = [0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [1.00, 1.00, 1.00,", "pandas.Series([0, 0, .3, .4, .4, .4, 1, 1]) right = pandas.Series([0, 0, 0,", "'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0,", "widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right", "[0, 0, .1, .2, .3, .3, .3, .3, 0, 0] result_left, result_right =", "False)[0].index) assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner =", ".3, .3, 0] expected = pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7, .8],", "index def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0, 0]) right = pandas.Series([0,", "0.03) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right,", "0.97 , 1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3,", "index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index =", "2, 6), (6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected): result =", "['1', '66', '0', 'X9', 'x33']), ( ['Genotype', 0, 1 ,2, 3], [0, 1,", "0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000, 0.000,", "widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1',", ".3, .3, 0, 0] result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist()", "= widgets.get_valid_points(left, right, dlimit = 0.03, inner = True) assert l.tolist() == [0.085]", "0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ]", ".97, inner = False)[0].index) assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right,", "([0, .2, 1, 1, 1], True), ([0, .1, .2, .3, .4, .5], False)", ".1, .1], 0) ] ) def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert", "pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ]", "0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [0.170, 0.55, 0.947, 1.00, 1.00,", "1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert", "result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9] assert result_right.tolist()", "test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right, 0.9) # Convert to bool for", "== [0, 0, .1, .2, .3, .3, .3, .3, 0] expected = pandas.DataFrame({", "expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261],", "0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860,", "based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored", "0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000,", "size, expected\", [ (3, 3, 1), (4, 2, 6), (6, 3, 20) ]", "0, 0], [1, 2]), ([0, 0, 0, 0, 0], [0, .14, 0, 0,", "= [1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner", "list(result_left.index) == [1] left = pandas.Series([0, 0, 0, 0, 0, 1, 1]) right", "expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r',", "Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984", "= widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000,", "2, 3, 4]), ] ) def test_get_detected_points(left, right, index): l = pandas.Series(left) r", "expected): result = widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000,", ".3, .3, .3, 0, 0, 0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert", ",2, 3], [0, 1, 2, 3]) ] ) def test_get_numeric_columns(columns, expected): result =", ".1, .2, .3, .3, .3, .3, 0, 0] result_left, result_right = widgets.get_valid_points(left, right,", "0, 0, 0, 0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0, 0, 0,", "switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() ==", "assert result.tolist() == [bool(i) for i in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]),", "expected_right = [1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, 0.03,", "4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index =", "0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000, 1.000,", "[ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n', 'g', '1'])", "assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000,", "0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1, .1, .1, .1], None), ([1, .1,", "widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() == [.3, .4, .5,", "0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3, 4] # Check the", "= [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def", "5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert [2, 3,", "1]) right = pandas.Series([0, 0, 0, .14, .53, 1, 1]) result_left, result_right =", "def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype(): table", "widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A',", "dlimit = 0.03, inner = True) assert l.tolist() == [0.085] assert r.tolist() ==", "left = pandas.Series([0, 0, 0, 0, 0, 1, 1]) right = pandas.Series([0, 0,", "1.00, 1.00]) expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right", "0, 0, 0, 0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit =", "pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3,", "1, 1], True), ([0, 1, 0, 1, 0, 1], True), ([0, .2, 1,", ".2, .3, .3, .3, .3], }, index = range(3, 9)) result_left, result_right =", "1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03)", "== list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index) assert [3, 4, 5, 6,", "list(rl.index) == index def test_get_detected_points_advanced(): left = pandas.Series([0, 0, 0, 0, 0]) right", "] ) def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize(", "= pandas.Series([0, 0, 0, 0, 0, 1, 1]) right = pandas.Series([0, 0, 0,", "(3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None),", "0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00])", "'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'} output =", "table = table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B',", "0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right,", "list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0],", "1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "Convert to bool for safety. assert result.tolist() == [bool(i) for i in expected]", "== expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert", "0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3)) ] )", "0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525,", "Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert expected_hash", "0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000,", "[2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index)", ".5, .6, .7, .8, .9, 1] assert result_right.tolist() == [0, 0, .1, .2,", "0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] ) def", "0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00,", "(3, 3, 1), (4, 2, 6), (6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements,", "right, dlimit = 0.03, inner = True) assert l.tolist() == [0.085] assert r.tolist()", "expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize( \"values, expected\", [", "[3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert", "widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() == [] and result_right.tolist()", "result = widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000,", "test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\",", "'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map", "0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0,", "'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members'])", "index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index", "result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right", "test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) # Since pandas likes to return a", "= [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])),", "20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected): result = widgets.calculate_number_of_combinations(elements, size) assert result", "0) ] ) def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result ==", "[] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500", "expected): result = widgets.get_fixed(values, 0.9) # Since pandas likes to return a series", "result = widgets.get_undetected(values, 0.03) # Since pandas likes to return a series of", "list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1],", "1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910],", "= widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0,", "inner = False)[0].index) assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03,", "'t', 'r', 'i', 'n', 'g', '1']) ] ) def test_coerce_to_list(values, expected): result =", ") def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype():", "assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000,", "[0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right,", "1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected): result", "rl, rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index) assert list(rl.index) ==", "0.4, 0.3, 1, 0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected): s =", ".14, 0, 1, 1], [1, 2, 3, 4]), ] ) def test_get_detected_points(left, right,", "2]), ([0, 0, 0, 0, 0], [0, .14, 0, 0, 0], [1]), ([0,", "expected\", [ ([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)), ([0.2, 1, 0.2,", "= widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left expected_left", ".13, 0, 0], [0, 1, 2, 3, 4]), ([0, 1, 0, 0.2, 0],", "pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020],", "d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>>", "def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001, 0.005])", "6), (6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected): result = widgets.calculate_number_of_combinations(elements,", "1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected): result = widgets.get_undetected(values,", ".14, .13, 0, 0], [0, 1, 2, 3, 4]), ([0, 1, 0, 0.2,", "result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index)", "1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0,", "== list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0,", "likes to return a series of bool values when comparing items rather than", "the values and index directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index)", "([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected):", "] ) def test_get_detected_points(left, right, index): l = pandas.Series(left) r = pandas.Series(right) rl,", "1551713984 -0500 commit: Changed Default Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value =", "= True) assert l.tolist() == [0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple(): left", "expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n', 'g',", "0, 1], True), ([0, .2, 1, 1, 1], False) ] ) def test_fixes_imediately(series,", "result.tolist() == [bool(i) for i in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]),", "([0, 1, 0, 1, 0, 1], True), ([0, .2, 1, 1, 1], True),", "index directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size,", "pandas.Series([])) ] ) def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) # Since pandas", "'66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']), ( ['Genotype',", "bool values when comparing items rather than a scalar result, # Let's check", "inner = True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner", "0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5), ([.1, .1, .1, .1], None), ([1,", "expected): result = widgets.get_undetected(values, 0.03) # Since pandas likes to return a series", "pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000],", "expected\", [ (['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9',", "right, 0.03, 0.97) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7,", ") def test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize(", "1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0,", "3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected): result = widgets.calculate_number_of_combinations(elements, size) assert", ".2, .3, .4, .5, .6, .7, .8, .9, 1] assert result_right.tolist() == [0,", "0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000,", ".8, .9, 1] assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3,", "[0, .14, 0, 1, 1], [1, 2, 3, 4]), ] ) def test_get_detected_points(left,", "test_find_boundaries_fixed(series, expected): s = pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result == expected", "@pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000,", "0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left = [0.000, 0.00, 0.00, 0.263, 0.07,", "list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3, 1), (4, 2,", "Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe", "widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3, 1, 0.97", "0, 0], [0, .14, 0, 0, 0], [1]), ([0, 0, 0, 0, 0],", "[0, 0, .1, .2, .3, .3, .3, .3, 0] expected = pandas.DataFrame({ 'left':", "'g', '1']) ] ) def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result ==", "0.3, 1, 0.97 , 1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1,", "inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002", "== expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0, 0.261, 1.000, 1.000, 1.000,", "1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03,", ") def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes", ".3, .4, .4, .4, 1, 1]) right = pandas.Series([0, 0, 0, .1, .1,", "pandas.Series([0, 0, 0, .1, .2, .3, .3, .3, .3, 0, 0, 0]) result_left,", "]) l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner = True) assert l.tolist()", "1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "0.03, inner = True) assert l.tolist() == [0.085] assert r.tolist() == [0.05] def", "= [5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] ) def", ".1, .2, .3, .3, .3, .3, 0] expected = pandas.DataFrame({ 'left': [.3, .4,", "= True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner =", "[1, 2, 3, 4]), ] ) def test_get_detected_points(left, right, index): l = pandas.Series(left)", "result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() == [.1, .2, .3, .4,", "0.03) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9,", "1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) #", "1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\", [", "'left': [.3, .4, .5, .6, .7, .8], 'right': [.1, .2, .3, .3, .3,", "expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])),", "= widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() == [.1, .2, .3, .4, .5,", "1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected): result = widgets.get_undetected(values, 0.03) # Since", "4] # Check the `inner` option. left = pandas.Series([0, 0, .3, .4, .4,", "= table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2':", "'B', 'C1': 'C'} output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [", "test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\",", "expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3),", "@pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3, 1), (4, 2, 6), (6, 3,", "assert l.tolist() == [0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple(): left = pandas.Series([0,", "index = [3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454,", "expected): result = widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes to return a", ".8] assert result_right.tolist() == [.1, .2, .3, .3, .3, .3] def test_get_valid_points_complex(): left", "0.910], [0,0,0,0,0,0,0]), ] ) def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right, 0.9)", "0.454, 0.911, 0.910], 5), ([.1, .1, .1, .1], None), ([1, .1, .1, .1],", "0.263, 0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00,", "master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on", "4]), ([0, 1, 0, 0.2, 0], [0, .14, 0, 0, 0], [1, 2,", "= pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } )", "[.1, .2, .3, .3, .3, .3], }, index = range(3, 9)) result_left, result_right", "result_right.tolist() == [.1, .2, .3, .3, .3, .3] def test_get_valid_points_complex(): left = pandas.Series([0.00,", "calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method \"\"\"", "0.97, inner = True) assert result_left.tolist() == [.3, .4, .5, .6, .7, .8]", "1, 0]) right = pandas.Series([0, 0, 0, .1, .2, .3, .3, .3, .3,", "assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]", "0.97, inner = True) assert result_left.tolist() == [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log')", "0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947,", "None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series, expected):", "pandas.Series(series) result = widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize( \"left, right, expected\",", "0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00,", "0, 0], [0, 1, 2, 3, 4]), ([0, 1, 0, 0.2, 0], [0,", "0, 0, 0, 0, 0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0, 0,", "[bool(i) for i in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]),", "= widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261,", "0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000, 1.000,", "0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3,", "1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5),", "[0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])),", "0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000, 0.860, 1.000,", "0, 0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1, .2,", "1, 2, 3, 4]), ([0, 1, 0, 0.2, 0], [0, .14, 0, 0,", "True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index)", "scalar result, # Let's check the values and index directly. assert result.tolist() ==", "and index directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values,", "list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000,", "patch from loguru import logger import pandas import pytest from muller import dataio,", "== [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522", "0], [0, .14, 0, 0, 0], [1]), ([0, 0, 0, 0, 0], [0,", "1], True), ([0, .2, 1, 1, 1], False) ] ) def test_fixes_imediately(series, expected):", "= pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left =", "{'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'} output", "result = widgets.find_boundaries_fixed(s, 0.97) assert result == expected @pytest.mark.parametrize( \"left, right, expected\", [", "0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000,", "-0500 checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500", "bool for safety. assert result.tolist() == [bool(i) for i in expected] @pytest.mark.parametrize( \"values,", "3, 4]), ] ) def test_get_detected_points(left, right, index): l = pandas.Series(left) r =", "items rather than a scalar result, # Let's check the values and index", "[ (['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']),", "index = [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index =", "result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() == [.3,", "def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2, .3, .4, .5, .6, .7, .8,", "1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000,", "0.081, 0.069, 0.042] expected_right = [1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right =", "= True) assert result_left.tolist() == [.3, .4, .5, .6, .7, .8] assert result_right.tolist()", ".14, 0, 0, 0], [1, 2, 3]), ([0, 0, 0, 0, 0], [0,", "[.1, .2, .3, .4, .5, .6, .7, .8, .9] assert result_right.tolist() == [0,", "0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00, 0.170,", "== expected_left expected_left = [0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [1.00, 1.00,", "-0500 commit: Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873", "result = widgets.get_overlap_regions(left, right, 0.9) # Convert to bool for safety. assert result.tolist()", "def test_get_detected_points(left, right, index): l = pandas.Series(left) r = pandas.Series(right) rl, rr =", "right = pandas.Series([0, 0, 0, .1, .2, .3, .3, .3, .3, 0, 0,", "1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525,", "1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454,", "= widgets.get_numeric_columns(columns) assert result == expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype':", "0, 0, 0, 1, 1]) right = pandas.Series([0, 0, 0, .14, .53, 1,", "expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000,", ".5], False) ] ) def test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97)", "0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1, .2, .3,", ".3, .4, .5, .6, .7, .8, .9, 1, 0]) right = pandas.Series([0, 0,", "assert list(result_left.index) == [1] left = pandas.Series([0, 0, 0, 0, 0, 1, 1])", "# Let's check the values and index directly. assert result.tolist() == expected.tolist() assert", "== expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members':", "4]), ] ) def test_get_detected_points(left, right, index): l = pandas.Series(left) r = pandas.Series(right)", "= widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index) assert list(rl.index) == index def", "5), ([.1, .1, .1, .1], None), ([1, .1, .1, .1], 0) ] )", "'n', 'g', '1']) ] ) def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result", "= [3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810],", ".3, 0, 0] result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert result_left.tolist() ==", "list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3, 1), (4, 2, 6), (6,", "assert list(rl.index) == list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced(): left = pandas.Series([0,", "0.042] expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right", "= pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001, 0.005]) right = pandas.Series([0,0,", "0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525,", "0.2, 0], [0, .14, 0, 0, 0], [1, 2, 3]), ([0, 0, 0,", "0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000],", "0, 0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner", "== expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000],", "1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03)", "== expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261,", "0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected): result", "045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522", "= False)[0].index) assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner", "[3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values,", "widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1] left", "[0, 1, 2, 3]) ] ) def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert", "right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left", "d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method \"\"\" expected_hash", "moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update", "def test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize(", "1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected): result =", "[3, 4] # Check the `inner` option. left = pandas.Series([0, 0, .3, .4,", "result == expected def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A', 'B', 'C'],", "checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit:", "'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table = table.set_index('genotype') expected_map =", "0, 0, 0, 0], [0, .14, .23, 0, 0], [1, 2]), ([0, 0,", "[1, 2]), ([0, 0, 0, 0, 0], [0, .14, 0, 0, 0], [1]),", ".6, .7, .8, .9, 1] assert result_right.tolist() == [0, 0, .1, .2, .3,", "'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']), ( ['Genotype', 0, 1 ,2,", "= pandas.Series([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 0])", "== expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4, 0.3, 1, 0.97 , 1],", "0, .3, .4, .4, .4, 1, 1]) right = pandas.Series([0, 0, 0, .1,", "result_left.tolist() == expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03)", "[] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522", "] ) def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result == expected def", "expected = pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7, .8], 'right': [.1, .2,", "0, 0, 1, 1, 1], True), ([0, 1, 0, 1, 0, 1], True),", "6] == list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index) assert [3, 4, 5,", "left = pandas.Series([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1,", "'1']) ] ) def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result == expected", "= pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0,", "1, 0, 1], True), ([0, .2, 1, 1, 1], True), ([0, .1, .2,", "1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454,", "test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001, 0.005]) right", "1, 1, 1], True), ([0, 1, 0, 1, 0, 1], True), ([0, .2,", "0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00, 0.00,", "right, index): l = pandas.Series(left) r = pandas.Series(right) rl, rr = widgets.get_valid_points(l, r,", "0, 0]) right = pandas.Series([0, .14, 0, 1, 1]) result_left, result_right = widgets.get_detected_points(left,", "= [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left,", "0.03) # Since pandas likes to return a series of bool values when", "3]), ([0, 0, 0, 0, 0], [0, .14, .23, 0, 0], [1, 2]),", "== list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left,", "= pandas.Series(left) r = pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index)", "pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7, .8], 'right': [.1, .2, .3, .3,", "0.947, 1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit =", "== expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right assert", "0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07,", "i in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s',", "\"series, expected\", [ ([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)), ([0.2, 1,", "0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200,", ".2, .3, .4, .5], False) ] ) def test_fixed(series, expected): s = pandas.Series(series)", "[ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000,", "list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0,", "0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left = [0.000, 0.00, 0.00, 0.263,", "def test_get_intermediate(values, expected): result = widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes to", "list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index) assert [3, 4, 5, 6, 7]", "0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000, 0.525,", "0.085, 0.001, 0.005]) right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ])", "expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])),", "inner = True) assert l.tolist() == [0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple():", ".4, .5, .6, .7, .8, .9, 1] assert result_right.tolist() == [0, 0, .1,", ".3, .3, .3], }, index = range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right,", "[5])), ([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values,", "0, 0, 0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit = 0.03,", "'r', 'i', 'n', 'g', '1']) ] ) def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values)", ".14, 0, 0, 0], [1]), ([0, 0, 0, 0, 0], [0, .14, 0,", "0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "[ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000,", ".6, .7, .8], 'right': [.1, .2, .3, .3, .3, .3], }, index =", "= True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0, 0, 0, 0.085,", ".2, 1]) assert [2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03,", "[0, 1, 2, 3, 4]), ([0, 1, 0, 0.2, 0], [0, .14, 0,", "import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0', 'X9', 'xc', 'x33',", "1, 2, 3]) ] ) def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result", "output = widgets.map_trajectories_to_genotype(table['members']) assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1,", "= pandas.Series([0, 0, 0, 0, 0]) right = pandas.Series([0, .14, 0, 1, 1])", "right, .03, inner = True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03,", ".5, .6, .7, .8, .9] assert result_right.tolist() == [0, 0, .1, .2, .3,", "assert widgets.fixed_immediately(s, 0.03, 0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1,", "0.03, 0.97, inner = True) assert result_left.tolist() == [] and result_right.tolist() == []", "== [bool(i) for i in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88],", "([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])),", "= {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'}", "1, 4, 5], [.23, .14, .13, 0, 0], [0, 1, 2, 3, 4]),", "range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert", "= pandas.Series([0, 0, 0, .1, .2, .3, .3, .3, .3, 0, 0, 0])", "['A1|A2|A3', 'B1|B2', 'C1'] } ) table = table.set_index('genotype') expected_map = {'A1': 'A', 'A2':", "left, 0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left expected_left = [0.263,", "expected_left = [0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [1.00, 1.00, 1.00, 1.00,", "1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit:", "a scalar result, # Let's check the values and index directly. assert result.tolist()", "list(rl.index) == list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced(): left = pandas.Series([0, 0,", "for i in expected] @pytest.mark.parametrize( \"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1',", "'C1'] } ) table = table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A', 'A3':", "0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000,", "commit: Update based on pycharm code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500", "cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master to version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown", "1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911,", "0.000, 0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9)", "[1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n', 'g', '1']) ] ) def test_coerce_to_list(values,", "(1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5], None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2],", "0, 0, 0, 0], [0, .14, 0, 1, 1], [1, 2, 3, 4]),", ".3, 0, 0, 0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() ==", "== [1] left = pandas.Series([0, 0, 0, 0, 0, 1, 1]) right =", "right, .03, inner = False)[0].index) assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left,", "0, 0, 0]) right = pandas.Series([0, .14, 0, 1, 1]) result_left, result_right =", ".2, .3, .3, .3, .3, 0, 0, 0]) result_left, result_right = widgets.get_valid_points(left, right,", "= pandas.DataFrame({ 'left': [.3, .4, .5, .6, .7, .8], 'right': [.1, .2, .3,", "== [.3, .4, .5, .6, .7, .8] assert result_right.tolist() == [.1, .2, .3,", "assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left,", ".4, .5], False) ] ) def test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s,", "test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert result == expected @pytest.mark.parametrize( \"values, expected\", [", "\"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\",", "== list(expected.index) @pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000],", "0.03) assert list(rl.index) == list(rr.index) assert list(rl.index) == index def test_get_detected_points_advanced(): left =", "1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]), ] )", "0.910], pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911,", "def test_get_commit_hash(filename_mock): test_file = \"\"\" 045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving", "0.069, 0.042] expected_right = [1.00, 1.00, 1.00, 1.00, 1.00] result_left, result_right = widgets.get_valid_points(left,", "}, index = range(3, 9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner", "3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index) assert", "test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042])", ") def test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97) == expected", "0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_undetected(values, expected): result =", "logger import pandas import pytest from muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\",", "= [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index =", "0.03, 0.9) # Since pandas likes to return a series of bool values", "pandas.Series([0, 0, 0, .1, .1, .1, .2, 1]) assert [2, 3, 4, 5,", "pandas.Series([])) ] ) def test_get_fixed(values, expected): result = widgets.get_fixed(values, 0.9) # Since pandas", "2, 3]), ([0, 0, 0, 0, 0], [0, .14, .23, 0, 0], [1,", "widgets.get_valid_points(left, right, 0.03) assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7,", "= pandas.Series(series) assert widgets.fixed(s, 0.97) == expected @pytest.mark.parametrize( \"series, expected\", [ ([0.1, 0.4,", "([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000,", "'right': [.1, .2, .3, .3, .3, .3], }, index = range(3, 9)) result_left,", "switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [1.00,", "([0, .2, 1, 1, 1], False) ] ) def test_fixes_imediately(series, expected): s =", "([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1],", ".97, inner = True)[0].index) def test_get_detected_points_inner(): left = pandas.Series([0, 0, 0, 0, 0,", "([.1, .1, .1, .1], None), ([1, .1, .1, .1], 0) ] ) def", ") table = table.set_index('genotype') expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1':", "\"left,right,index\", [ ([0, 1, 1, 4, 5], [.23, .14, .13, 0, 0], [0,", "= test_file result_hash = widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0,", "0, 1], True), ([0, .2, 1, 1, 1], True), ([0, .1, .2, .3,", "7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert [3, 4, 5] ==", "9)) result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist()", ".3, .4, .5, .6, .7, .8, .9, 1] assert result_right.tolist() == [0, 0,", "list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert [3, 4, 5] == list(widgets.get_detected_points(left, right,", "== [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1] assert result_right.tolist()", "expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist()", "pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right = pandas.Series([0.00,", "1], [1, 2, 3, 4]), ] ) def test_get_detected_points(left, right, index): l =", "expected_left assert result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist()", "assert switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07, 0.081,", "True), ([0, .2, 1, 1, 1], True), ([0, .1, .2, .3, .4, .5],", "result = widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize( \"values, expected\", [ ([0.000,", "1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4, 0.5],", "Since pandas likes to return a series of bool values when comparing items", "= pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index) assert", "\"values, expected\", [ (pandas.Series([4,7,9,11]), [4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n',", "widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes to return a series of bool", "right = pandas.Series([0, .14, 0, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03,", "@pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index", "assert result_right.tolist() == expected_right switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03) assert switched_result_left.tolist() ==", "rather than a scalar result, # Let's check the values and index directly.", "0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810],", "result = widgets.get_fixed(values, 0.9) # Since pandas likes to return a series of", "3]) ] ) def test_get_numeric_columns(columns, expected): result = widgets.get_numeric_columns(columns) assert result == expected", ") def test_get_first_fixed_timepoint(values, expected): result = widgets.get_first_fixed_timepoint(values, 0.9) assert result == expected @pytest.mark.parametrize(", "0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([])) ] ) def test_get_fixed(values, expected): result", "1), (4, 2, 6), (6, 3, 20) ] ) def test_calculate_total_number_of_combinations(elements, size, expected):", "0.910], index = [5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index", "0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]) expected_left = [0.000, 0.00, 0.00,", "l.tolist() == [0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1,", "right, expected\", [ ([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525,", "3, 4]), ([0, 1, 0, 0.2, 0], [0, .14, 0, 0, 0], [1,", "0], [0, .14, .23, 0, 0], [1, 2]), ([0, 0, 0, 0, 0],", "5] == list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index) def test_get_detected_points_inner(): left =", "== [0.085] assert r.tolist() == [0.05] def test_get_valid_points_simple(): left = pandas.Series([0, .1, .2,", "left = pandas.Series([0, 0, .3, .4, .4, .4, 1, 1]) right = pandas.Series([0,", "= pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]) right =", ", 1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)), ([0.1, 0.2, 0.3, 0.4,", "[4,7,9,11]), ([1,88,4,88], [1,88,4,88]), ('string1', ['s', 't', 'r', 'i', 'n', 'g', '1']) ] )", "l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner = True) assert l.tolist() ==", "0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000,", "0.081, 0.069, 0.042] expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]", ".9, 1] assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3,", "= \"f086ec9\" filename_mock.return_value = test_file result_hash = widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize(", "0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000, 0.000, 0.000, 0.525, 0.454,", "expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1':", "switched_result_left.tolist() == expected_right assert switched_result_right.tolist() == expected_left expected_left = [0.263, 0.07, 0.081, 0.069,", "0.910], [0,0,0,0,0,1,1]), ([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454,", "= True) assert result_left.tolist() == [] and result_right.tolist() == [] @patch('muller.widgets._get_git_log') def test_get_commit_hash(filename_mock):", "'66', '0', 'X9', 'x33']), ( ['Genotype', 0, 1 ,2, 3], [0, 1, 2,", ".3, .3, .3, .3, 0] expected = pandas.DataFrame({ 'left': [.3, .4, .5, .6,", "'members': ['A1|A2|A3', 'B1|B2', 'C1'] } ) table = table.set_index('genotype') expected_map = {'A1': 'A',", "test_file result_hash = widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0,", "] ) def test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97) == expected", "right, 0.9) # Convert to bool for safety. assert result.tolist() == [bool(i) for", "expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements, size, expected\", [ (3, 3, 1),", "] ) def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right, 0.9) # Convert", "= widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1,", "list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3, 4] # Check the `inner` option.", "pandas.Series([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 0]) right", "0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])), ([0.000, 0.000, 0.000, 0.525,", "= widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [3,", "0.4, 0.3, 1, 0.97 , 1], (3,5)), ([0.2, 1, 0.2, 0.98, 0.1], (1,3)),", ".3, .3, 0, 0, 0]) result_left, result_right = widgets.get_valid_points(left, right, 0.03) assert result_left.tolist()", "([0, 1, 1, 4, 5], [.23, .14, .13, 0, 0], [0, 1, 2,", "0.97) == expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True),", "for safety. assert result.tolist() == [bool(i) for i in expected] @pytest.mark.parametrize( \"values, expected\",", "version0.2 045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on pycharm code", "[1] left = pandas.Series([0, 0, 0, 0, 0, 1, 1]) right = pandas.Series([0,", "1.000, 1.000, 1.000, 1.000], pandas.Series([])) ] ) def test_get_intermediate(values, expected): result = widgets.get_intermediate(values,", "0, 0], [1]), ([0, 0, 0, 0, 0], [0, .14, 0, 1, 1],", "\"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3), ([0.000, 0.000,", "0, 0, 1, 1]) right = pandas.Series([0, 0, 0, .14, .53, 1, 1])", "0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000, 0.000, 0.000,", "and index directly. assert result.tolist() == expected.tolist() assert list(result.index) == list(expected.index) @pytest.mark.parametrize( \"elements,", "[2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3,", "([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000,", "([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])), ([1.000, 1.000,", "from muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0', 'X9',", "0, 0, 0], [0, .14, .23, 0, 0], [1, 2]), ([0, 0, 0,", ".23, 0, 0], [1, 2]), ([0, 0, 0, 0, 0], [0, .14, 0,", "0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])), ([0.000, 0.000, 0.000,", "1]) right = pandas.Series([0, 0, 0, .1, .1, .1, .2, 1]) assert [2,", "right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1] left =", "pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right, dlimit", "[0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [0.170, 0.55, 0.947,", ".4, .4, .4, 1, 1]) right = pandas.Series([0, 0, 0, .1, .1, .1,", "Check the `inner` option. left = pandas.Series([0, 0, .3, .4, .4, .4, 1,", "] ) def test_calculate_total_number_of_combinations(elements, size, expected): result = widgets.calculate_number_of_combinations(elements, size) assert result ==", "= pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ]) l,r = widgets.get_valid_points(left, right,", "(['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']), (", "0, .14, .53, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert", ".5, .6, .7, .8, .9, 1, 0]) right = pandas.Series([0, 0, 0, .1,", "0.525, 0.454, 0.911, 0.910], 5), ([.1, .1, .1, .1], None), ([1, .1, .1,", "5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index) assert [3, 4,", "'0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']), ( ['Genotype', 0,", "= widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist() == expected_left assert result_right.tolist() ==", "expected @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0, 1, 1, 1], True), ([0, 1,", "[3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])),", "widgets.get_fixed(values, 0.9) # Since pandas likes to return a series of bool values", "r = pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index)", "False) ] ) def test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s, 0.03, 0.97)", "0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])), ([0.000, 0.000, 0.000, 0.525, 0.020,", ".3, .3, .3, 0, 0] result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97) assert", "False) ] ) def test_fixed(series, expected): s = pandas.Series(series) assert widgets.fixed(s, 0.97) ==", "pandas.Series([1,1,1,1], index = [3,4,5,6])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910],", "1 ,2, 3], [0, 1, 2, 3]) ] ) def test_get_numeric_columns(columns, expected): result", "1], True), ([0, .2, 1, 1, 1], True), ([0, .1, .2, .3, .4,", "code inspecter 78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation d0aa33355336fa3772da8e823660c61296960dfe", "@pytest.mark.parametrize( \"values, expected\", [ ([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index", ".4, .5, .6, .7, .8], 'right': [.1, .2, .3, .3, .3, .3], },", "0, 0, 0, 0]) right = pandas.Series([0, .14, 0, 1, 1]) result_left, result_right", "Changed Default Clustering Method \"\"\" expected_hash = \"f086ec9\" filename_mock.return_value = test_file result_hash =", "1, 1, 1], False) ] ) def test_fixes_imediately(series, expected): s = pandas.Series(series) assert", "pandas.Series([0,0], index = [0,1])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0],", "inner = True) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right result_left, result_right", "0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])), ([0.000, 0.000,", "muller import dataio, widgets @pytest.mark.parametrize( \"columns, expected\", [ (['1', '66', '0', 'X9', 'xc',", "result_right = widgets.get_valid_points(left, right, 0.03, inner = True) assert result_left.tolist() == expected_left assert", "1.00, 1.00] result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist() ==", "def test_get_overlap_regions(left, right, expected): result = widgets.get_overlap_regions(left, right, 0.9) # Convert to bool", "def test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2',", "assert list(result_left.index) == [3, 4] # Check the `inner` option. left = pandas.Series([0,", "1], True), ([0, .1, .2, .3, .4, .5], False) ] ) def test_fixed(series,", "0], [1, 2]), ([0, 0, 0, 0, 0], [0, .14, 0, 0, 0],", "pandas.Series(right) rl, rr = widgets.get_valid_points(l, r, 0.03) assert list(rl.index) == list(rr.index) assert list(rl.index)", "assert expected_map == output @pytest.mark.parametrize( \"left,right,index\", [ ([0, 1, 1, 4, 5], [.23,", "= pandas.Series([0, .14, 0, 1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97)", "= widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() == [] and", "assert result == expected @pytest.mark.parametrize( \"left, right, expected\", [ ([0, 0, 0.261, 1.000,", ".1], None), ([1, .1, .1, .1], 0) ] ) def test_get_first_fixed_timepoint(values, expected): result", "0.454, 0.810], index = [3,4,6])), ([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([]))", "def test_get_valid_points_complex(): left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069,", "result_right = widgets.get_valid_points(left, right, dlimit = 0.03) assert result_left.tolist() == expected_left assert result_right.tolist()", "= [0.263, 0.07, 0.081, 0.069, 0.042] expected_right = [1.00, 1.00, 1.00, 1.00, 1.00]", "loguru import logger import pandas import pytest from muller import dataio, widgets @pytest.mark.parametrize(", "assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index)", "1, 1], False) ] ) def test_fixes_imediately(series, expected): s = pandas.Series(series) assert widgets.fixed_immediately(s,", "check the values and index directly. assert result.tolist() == expected.tolist() assert list(result.index) ==", "0, 0, 0], [1]), ([0, 0, 0, 0, 0], [0, .14, 0, 1,", "import logger import pandas import pytest from muller import dataio, widgets @pytest.mark.parametrize( \"columns,", "result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner = True) assert result_left.tolist() == expected_left", "result_hash = widgets.get_commit_hash() assert expected_hash == result_hash @pytest.mark.parametrize( \"series,expected\", [ ([0, 0, 0,", "result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True) assert result_left.tolist() ==", ".2, .3, .3, .3, .3, 0] expected = pandas.DataFrame({ 'left': [.3, .4, .5,", "1, 1]) right = pandas.Series([0, 0, 0, .14, .53, 1, 1]) result_left, result_right", "right, 0.03, 0.97, inner = True) assert result_left.tolist() == [] and result_right.tolist() ==", "'i', 'n', 'g', '1']) ] ) def test_coerce_to_list(values, expected): result = widgets._coerce_to_list(values) assert", "0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])), ([0.000, 0.000, 0.860,", "[0, .14, 0, 0, 0], [1, 2, 3]), ([0, 0, 0, 0, 0],", "0.03, inner = True) assert result_left.tolist() == expected_left assert result_right.tolist() == expected_right result_left,", "result = widgets.get_intermediate(values, 0.03, 0.9) # Since pandas likes to return a series", "test_map_trajectories_to_genotype(): table = pandas.DataFrame( { 'genotype': ['A', 'B', 'C'], 'members': ['A1|A2|A3', 'B1|B2', 'C1']", "= widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index) assert list(result_left.index) == [1]", "1, 1]) result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97) assert list(result_left.index) == list(result_right.index)", "= [2])), ([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index =", "== expected_left assert result_right.tolist() == expected_right result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97,", "0.5], None), ([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3)) ] ) def test_find_boundaries_fixed(series," ]
[ "<gh_stars>1-10 import numpy as np import random import torch def seeding(seed: int): np.random.seed(seed)", "numpy as np import random import torch def seeding(seed: int): np.random.seed(seed) random.seed(seed) torch.manual_seed(seed)", "import numpy as np import random import torch def seeding(seed: int): np.random.seed(seed) random.seed(seed)" ]
[ "service: return key return None def deploy(status, service, cmd): if service in status['services']:", "neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\"", "\"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" :", ": \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out Juju ID of machine", "\"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\"", ": \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out Juju", "import subprocess import json import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname", "as service that runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\",", "value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service: return", "deploy --to=%s %s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd,", "that runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\",", "(machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def get_juju_status(): output", "\"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\"", ": \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } #", "\"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", #", "\"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out Juju ID", "on def get_machine(status, service): if service == \"mongodb\": service = \"ceilometer\" for key,", "\"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out Juju ID of machine we", "neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\"", "same hostname as service that runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\",", ": \"ceilometer-agent\" } # Figure out Juju ID of machine we should install", "aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service: return key return None def", "if get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr,", "machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services", "should install on def get_machine(status, service): if service == \"mongodb\": service = \"ceilometer\"", "\"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\"", "hostname == service: return key return None def deploy(status, service, cmd): if service", "cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def get_juju_status(): output =", "in services.iteritems(): try: deploy(status, service, cmd) except: pass def addmachines(): status = get_juju_status()", "\"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out", ": \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\"", "hostname as service that runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\",", "VMs have same hostname as service that runs inside machines = [\"mysql\", \"rabbitmq-server\",", "inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"]", "# \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig,", "\"glance\" : \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig,", "status = get_juju_status() for machine in machines: if get_machine(status, machine) == None: ipaddr", "output = subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output) return status def addservices():", "service, cmd in services.iteritems(): try: deploy(status, service, cmd) except: pass def addmachines(): status", "None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def main(): addmachines()", "cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\"", "for service, cmd in services.iteritems(): try: deploy(status, service, cmd) except: pass def addmachines():", "\"nagios\", \"neutron-api\"] services = { \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" :", "status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service: return key return", "status['services']: return print \"Installing %s\" % service machine = get_machine(status, service) if machine:", "\"neutron-api\"] services = { \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s", ": \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\"", "Juju ID of machine we should install on def get_machine(status, service): if service", "# Figure out Juju ID of machine we should install on def get_machine(status,", "jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" %", "if service == \"mongodb\": service = \"ceilometer\" for key, value in status['machines'].iteritems(): (hostname,", "deploy %s\" % cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True)", "% jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\"", ": \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" :", "get_juju_status() for machine in machines: if get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine)", "get_juju_status() for service, cmd in services.iteritems(): try: deploy(status, service, cmd) except: pass def", "get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True)", "\"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s", "% jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" %", "pass def addmachines(): status = get_juju_status() for machine in machines: if get_machine(status, machine)", "glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s", "% jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" %", "\"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out Juju ID of", "subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def main(): addmachines() addservices() if __name__ =='__main__':", ": \"mongodb\", # deploy to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\",", "ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service: return key return None def deploy(status,", "import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname as service that runs", "in status['services']: return print \"Installing %s\" % service machine = get_machine(status, service) if", "machines: if get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" %", "socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname as service that runs inside", "def deploy(status, service, cmd): if service in status['services']: return print \"Installing %s\" %", "if service in status['services']: return print \"Installing %s\" % service machine = get_machine(status,", "return print \"Installing %s\" % service machine = get_machine(status, service) if machine: subprocess.check_call(\"juju", "% (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def get_juju_status():", "\"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\" : \"mysql\", \"rabbitmq-server\"", "ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" :", "service = \"ceilometer\" for key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name'])", "neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\"", "\"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\"", "runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\",", "\"nagios\", \"mongodb\" : \"mongodb\", # deploy to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\"", "status --format=json\", shell=True) status = json.loads(output) return status def addservices(): status = get_juju_status()", "out Juju ID of machine we should install on def get_machine(status, service): if", "jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig,", "service, cmd): if service in status['services']: return print \"Installing %s\" % service machine", "deploy to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\",", "service that runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\",", "get_machine(status, service): if service == \"mongodb\": service = \"ceilometer\" for key, value in", "== service: return key return None def deploy(status, service, cmd): if service in", "# deploy to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" :", "== None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def main():", "machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def", "return key return None def deploy(status, service, cmd): if service in status['services']: return", "deploy(status, service, cmd) except: pass def addmachines(): status = get_juju_status() for machine in", "\"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", # deploy to", "except: pass def addmachines(): status = get_juju_status() for machine in machines: if get_machine(status,", "\"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\" : \"mysql\",", "jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname as service that runs inside machines", "install on def get_machine(status, service): if service == \"mongodb\": service = \"ceilometer\" for", "cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s", "we should install on def get_machine(status, service): if service == \"mongodb\": service =", "{ \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig,", "jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" %", "in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service: return key", "if hostname == service: return key return None def deploy(status, service, cmd): if", "services.iteritems(): try: deploy(status, service, cmd) except: pass def addmachines(): status = get_juju_status() for", "\"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\" : \"mysql\", \"rabbitmq-server\" :", "\"ceilometer-agent\" : \"ceilometer-agent\" } # Figure out Juju ID of machine we should", "\"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s", "% cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True) status =", "\"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" :", "cmd): if service in status['services']: return print \"Installing %s\" % service machine =", "json.loads(output) return status def addservices(): status = get_juju_status() for service, cmd in services.iteritems():", "to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\"", ": \"nagios\", \"mongodb\" : \"mongodb\", # deploy to ceilometer machine \"ceilometer\" : \"ceilometer\",", "def addmachines(): status = get_juju_status() for machine in machines: if get_machine(status, machine) ==", ": \"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\"", "service == \"mongodb\": service = \"ceilometer\" for key, value in status['machines'].iteritems(): (hostname, aliaslist,", "\"mongodb\": service = \"ceilometer\" for key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) =", "get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output) return status def", "subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" %", "% jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", # deploy to ceilometer machine", "%s\" % cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True) status", "\"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" } # Figure", "shell=True) status = json.loads(output) return status def addservices(): status = get_juju_status() for service,", "= { \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" %", "json import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname as service that", ": \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" :", "jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig,", "key return None def deploy(status, service, cmd): if service in status['services']: return print", "for machine in machines: if get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju", "ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def main(): addmachines() addservices()", "service): if service == \"mongodb\": service = \"ceilometer\" for key, value in status['machines'].iteritems():", "subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\",", "nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s", "cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output)", "\"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig,", "% jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\"", "\"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\"", "[\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = {", "\"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, #", "# \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig,", "\"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\",", "cmd in services.iteritems(): try: deploy(status, service, cmd) except: pass def addmachines(): status =", "try: deploy(status, service, cmd) except: pass def addmachines(): status = get_juju_status() for machine", "% service machine = get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\" %", "socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def main(): addmachines() addservices() if __name__", "service in status['services']: return print \"Installing %s\" % service machine = get_machine(status, service)", ": \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\" %", "services = { \"mysql\" : \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\"", "have same hostname as service that runs inside machines = [\"mysql\", \"rabbitmq-server\", \"keystone\",", "\"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", # deploy to ceilometer machine \"ceilometer\" :", "addmachines(): status = get_juju_status() for machine in machines: if get_machine(status, machine) == None:", ": \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" :", "else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status", "return status def addservices(): status = get_juju_status() for service, cmd in services.iteritems(): try:", "cmd) except: pass def addmachines(): status = get_juju_status() for machine in machines: if", "== \"mongodb\": service = \"ceilometer\" for key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist)", "machine in machines: if get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine", "\"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\"", "status = json.loads(output) return status def addservices(): status = get_juju_status() for service, cmd", "\"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" :", "subprocess import json import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname as", "= \"ceilometer\" for key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if", "get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd), shell=True) else:", "\"ceilometer-agent\" } # Figure out Juju ID of machine we should install on", "service machine = get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine,", "(hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service: return key return None", "\"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig, #", "\"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\"", "jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig,", "jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", # deploy to ceilometer machine \"ceilometer\"", "print \"Installing %s\" % service machine = get_machine(status, service) if machine: subprocess.check_call(\"juju deploy", "import json import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same hostname as service", "jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" %", "= get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd), shell=True)", "= get_juju_status() for service, cmd in services.iteritems(): try: deploy(status, service, cmd) except: pass", "machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\"", "addservices(): status = get_juju_status() for service, cmd in services.iteritems(): try: deploy(status, service, cmd)", "Figure out Juju ID of machine we should install on def get_machine(status, service):", "machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\"", "socket.gethostbyaddr(value['dns-name']) if hostname == service: return key return None def deploy(status, service, cmd):", "machine = get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd),", ": \"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\" :", "<filename>scripts/juju-setup.py #!/usr/bin/python import subprocess import json import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have", "% jujuconfig, \"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\"", "% jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" :", "subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output) return status def addservices(): status =", "in machines: if get_machine(status, machine) == None: ipaddr = socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\"", "None def deploy(status, service, cmd): if service in status['services']: return print \"Installing %s\"", "jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\",", "shell=True) def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output) return", "machine we should install on def get_machine(status, service): if service == \"mongodb\": service", "= socket.gethostbyname(machine) subprocess.check_call(\"juju add-machine ssh:%s\" % ipaddr, shell=True) def main(): addmachines() addservices() if", "} # Figure out Juju ID of machine we should install on def", "openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", # deploy to ceilometer", "deploy(status, service, cmd): if service in status['services']: return print \"Installing %s\" % service", "\"Installing %s\" % service machine = get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s", "ID of machine we should install on def get_machine(status, service): if service ==", "\"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" :", "% jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\"", "Assumption: VMs have same hostname as service that runs inside machines = [\"mysql\",", "= subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output) return status def addservices(): status", "--to=%s %s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True)", "status = get_juju_status() for service, cmd in services.iteritems(): try: deploy(status, service, cmd) except:", "return None def deploy(status, service, cmd): if service in status['services']: return print \"Installing", "\"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" % jujuconfig, \"neutron-gateway\"", "\"--config=%s neutron-gateway\" % jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s", "\"mongodb\", # deploy to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\"", "\"neutron-gateway\" : \"--config=%s cs:~andybavier/trusty/neutron-gateway\" % jujuconfig, # \"neutron-gateway\" : \"--config=%s neutron-gateway\" % jujuconfig,", "%s\" % service machine = get_machine(status, service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\"", "if machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy", "status def addservices(): status = get_juju_status() for service, cmd in services.iteritems(): try: deploy(status,", "\"ceilometer\" for key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname", "key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname == service:", "= socket.gethostbyaddr(value['dns-name']) if hostname == service: return key return None def deploy(status, service,", "def addservices(): status = get_juju_status() for service, cmd in services.iteritems(): try: deploy(status, service,", "shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def get_juju_status(): output = subprocess.check_output(\"juju", "service) if machine: subprocess.check_call(\"juju deploy --to=%s %s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju", "\"mysql\", \"rabbitmq-server\" : \"rabbitmq-server\", \"keystone\" : \"--config=%s keystone\" % jujuconfig, \"glance\" : \"--config=%s", ": \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\", \"mongodb\" : \"mongodb\", # deploy", "keystone\" % jujuconfig, \"glance\" : \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s", "add-machine ssh:%s\" % ipaddr, shell=True) def main(): addmachines() addservices() if __name__ =='__main__': main()", "% jujuconfig, \"neutron-api\" : \"--config=%s neutron-api\" % jujuconfig, \"neutron-openvswitch\" : \"--config=%s neutron-openvswitch\" %", ": \"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" :", ": \"--config=%s glance\" % jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\"", "for key, value in status['machines'].iteritems(): (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name']) if hostname ==", "\"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services = { \"mysql\" :", "# Assumption: VMs have same hostname as service that runs inside machines =", "--format=json\", shell=True) status = json.loads(output) return status def addservices(): status = get_juju_status() for", "= [\"mysql\", \"rabbitmq-server\", \"keystone\", \"glance\", \"nova-cloud-controller\", \"neutron-gateway\", \"openstack-dashboard\", \"ceilometer\", \"nagios\", \"neutron-api\"] services =", "= get_juju_status() for machine in machines: if get_machine(status, machine) == None: ipaddr =", "def get_juju_status(): output = subprocess.check_output(\"juju status --format=json\", shell=True) status = json.loads(output) return status", "#!/usr/bin/python import subprocess import json import socket jujuconfig=\"/usr/local/src/openstack.cfg\" # Assumption: VMs have same", "\"mongodb\" : \"mongodb\", # deploy to ceilometer machine \"ceilometer\" : \"ceilometer\", \"nrpe\" :", "\"--config=%s neutron-openvswitch\" % jujuconfig, \"openstack-dashboard\" : \"--config=%s openstack-dashboard\" % jujuconfig, \"nagios\" : \"nagios\",", "%s\" % (machine, cmd), shell=True) else: subprocess.check_call(\"juju deploy %s\" % cmd, shell=True) def", "jujuconfig, # \"nova-cloud-controller\" : \"--config=%s cs:~andybavier/trusty/nova-cloud-controller\" % jujuconfig, \"nova-cloud-controller\" : \"--config=%s nova-cloud-controller\" %", "\"ceilometer\" : \"ceilometer\", \"nrpe\" : \"nrpe\", \"ntp\" : \"ntp\", \"ceilometer-agent\" : \"ceilometer-agent\" }", "def get_machine(status, service): if service == \"mongodb\": service = \"ceilometer\" for key, value", "of machine we should install on def get_machine(status, service): if service == \"mongodb\":", "service, cmd) except: pass def addmachines(): status = get_juju_status() for machine in machines:", "= json.loads(output) return status def addservices(): status = get_juju_status() for service, cmd in" ]
[ "10, 2020 # Main Contact: Donghae Jang # # This software is free", "5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x,", "= x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x =", "(5, 1)) self.conv4 = nn.Conv2d(64, 64, (5, 1)) # self.lstm1 = nn.LSTM(7232, 128,", "myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2", "nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device =", "super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5,", "# # Author: <NAME> (<EMAIL>), Seoul National University # U Kang (<EMAIL>), Seoul", "self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) #", "# self.lstm2 = nn.LSTM(128, 128, batch_first = True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128,", "x.device x = x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x) x = self.conv3(x)", "torch.nn as nn import torch.nn.functional as F import math from compressed_lstm import myLSTM", "research purposes. # For commercial purposes, please contact the authors. # ################################################################################ import", "128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) self.fc1", "2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h =", "__init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1", "Version : 1.0 # Date : Nov 10, 2020 # Main Contact: Donghae", "myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128,", "c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output,", "# self.lstm2 = nn.LSTM(128, 128, batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128],", "from torch.nn import Parameter, ParameterList import torch.nn as nn import torch.nn.functional as F", "c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output, (h1, c1)) #output", "= myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1", "self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1 = nn.LSTM(7232, 128)", "for implementing DeepConvLSTM # This is implementation of DeepcConvolutional part, and LSTM part", "2020 # Main Contact: Donghae Jang # # This software is free of", "Author: <NAME> (<EMAIL>), Seoul National University # U Kang (<EMAIL>), Seoul National University", "= nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device", "University # U Kang (<EMAIL>), Seoul National University # # Version : 1.0", "uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64,", "def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__()", "h = self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0),", "128, batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232,", "torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1", "= nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 =", "class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5,", "output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output", "Jang # # This software is free of charge under research purposes. #", "F import math from compressed_lstm import myLSTM from compressed_gru import myGRU # Code", "self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64, 64, (5, 1)) #", "# self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru = myGRU(7232,", "x, hidden=None): self.device = x.device x = x.unsqueeze(1) x = self.conv1(x) x =", "128, batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) #", "# Author: <NAME> (<EMAIL>), Seoul National University # U Kang (<EMAIL>), Seoul National", "# self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5 * 5, 120)", "x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h = self.gru(x) \"\"\" h0", "x = x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x", "x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x,", "torch from torch.nn import Parameter, ParameterList import torch.nn as nn import torch.nn.functional as", "nn.Conv2d(64, 64, (5, 1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first = True) #", "compressed_gru import myGRU # Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size,", "import torch.nn.functional as F import math from compressed_lstm import myLSTM from compressed_gru import", "# Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True,", "filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64,", "64, (5, 1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first = True) # self.lstm2", "84) self.fc3 = nn.Linear(84, 10) def forward(self, x, hidden=None): self.device = x.device x", "= nn.Linear(84, 10) def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1)", "import math from compressed_lstm import myLSTM from compressed_gru import myGRU # Code for", "# Date : Nov 10, 2020 # Main Contact: Donghae Jang # #", "64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64, 64,", "self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.permute(0,", "= nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 =", "from compressed_lstm import myLSTM from compressed_gru import myGRU # Code for implementing DeepConvLSTM", "nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84,", "= myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 =", "nn.LSTM(128, 128, batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru =", "import myGRU # Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32,", "128).to(self.device) output, (h, c) = self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output =", "wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 =", "x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.permute(0, 2,", "= torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c)", "128, batch_first = True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True)", "self.conv4 = nn.Conv2d(64, 64, (5, 1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first =", "(5, 1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first = True) # self.lstm2 =", "= nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3 =", "Nov 10, 2020 # Main Contact: Donghae Jang # # This software is", "purposes, please contact the authors. # ################################################################################ import torch from torch.nn import Parameter,", "implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None,", "= nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4 =", "myLSTM from compressed_gru import myGRU # Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def", "128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x, (h0,", "self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0),", "c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 =", "Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None,", "# self.lstm1 = nn.LSTM(7232, 128, batch_first = True) # self.lstm2 = nn.LSTM(128, 128,", "# For commercial purposes, please contact the authors. # ################################################################################ import torch from", "= self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.permute(0, 2, 1,", "128) def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1) x =", "#print(x.shape) output, (h, c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0),", "is implementation of DeepcConvolutional part, and LSTM part will be added class DeepConv(nn.Module):", "#print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h,", "= x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h = self.gru(x) \"\"\" h0 =", "True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) def forward(self,", "\"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output,", "with factorization method : Lowrank and group-lowrank rnn # # Author: <NAME> (<EMAIL>),", "self.lstm2 = nn.LSTM(128, 128, batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True)", "5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def", "(5, 1)) self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64, 64, (5,", "# Version : 1.0 # Date : Nov 10, 2020 # Main Contact:", "= nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1)", "group-lowrank rnn # # Author: <NAME> (<EMAIL>), Seoul National University # U Kang", "################################################################################ import torch from torch.nn import Parameter, ParameterList import torch.nn as nn import", "h # Code for implementing DeepConvLSTM # This is implementation of DeepcConvolutional part,", "nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64,", "x = self.conv4(x) x = x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1),", "Lowrank and group-lowrank rnn # # Author: <NAME> (<EMAIL>), Seoul National University #", "= myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128],", "__init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 =", "Seoul National University # U Kang (<EMAIL>), Seoul National University # # Version", "# This software is free of charge under research purposes. # For commercial", "myGRU # Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32],", "= output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return x, h # Code for", "ParameterList import torch.nn as nn import torch.nn.functional as F import math from compressed_lstm", "(h, c) = self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output = output[0,:,:] \"\"\"", "= nn.Conv2d(64, 64, (5, 1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first = True)", "= x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) return", "# # Version : 1.0 # Date : Nov 10, 2020 # Main", "nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x, hidden=None): self.device = x.device", "= True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) # self.lstm =", "This is implementation of DeepcConvolutional part, and LSTM part will be added class", "charge under research purposes. # For commercial purposes, please contact the authors. #", "1)) self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64, 64, (5, 1))", "= output[0,:,:] \"\"\" ######################################### return x, h # Code for implementing DeepConvLSTM #", "**kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64,", "x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h", "super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5,", "# self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1 = nn.LSTM(7232,", "x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1 =", "128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128)", "This software is free of charge under research purposes. # For commercial purposes,", "factorization method : Lowrank and group-lowrank rnn # # Author: <NAME> (<EMAIL>), Seoul", "x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h = self.gru(x) \"\"\" h0 = torch.zeros(1,", "nn import torch.nn.functional as F import math from compressed_lstm import myLSTM from compressed_gru", "128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1,", "For commercial purposes, please contact the authors. # ################################################################################ import torch from torch.nn", "def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2", "nn.LSTM(128, 128, batch_first = True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first =", "* 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)", "batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) self.lstm =", "= nn.LSTM(7232, 128, batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first =", "nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5 *", "from compressed_gru import myGRU # Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self,", "part, and LSTM part will be added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64):", "LSTM part will be added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__()", "DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1))", "(5, 1)) self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64, 64, (5,", "(h, c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1", "DeepcConvolutional part, and LSTM part will be added class DeepConv(nn.Module): def __init__(self, filter_size=5,", "= self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return", "1)) self.conv4 = nn.Conv2d(64, 64, (5, 1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first", "output, (h, c) = self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device)", "return x, h # Code for implementing DeepConvLSTM # This is implementation of", "x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x", "input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 =", "#output = output[0,:,:] \"\"\" ######################################### return x, h # Code for implementing DeepConvLSTM", "Main Contact: Donghae Jang # # This software is free of charge under", "myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 =", "self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return x,", "self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2", "torch.nn import Parameter, ParameterList import torch.nn as nn import torch.nn.functional as F import", "* 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self,", "as nn import torch.nn.functional as F import math from compressed_lstm import myLSTM from", "contact the authors. # ################################################################################ import torch from torch.nn import Parameter, ParameterList import", "torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output,", "torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2)", "National University # # Version : 1.0 # Date : Nov 10, 2020", "= torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x, (h0, c0)) #print(output.shape)", "DeepConvLSTM # This is implementation of DeepcConvolutional part, and LSTM part will be", "self.conv3(x) x = self.conv4(x) x = x.permute(0, 2, 1, 3) x = x.reshape(x.size(0),", "= torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) =", "nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64,", "64, (5, 1)) self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64, 64,", "= nn.LSTM(128, 128, batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru", "batch_first = True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128)", "= self.conv4(x) x = x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2)", "hidden_layer_sizes=[128, 128], batch_first = True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first =", "(<EMAIL>), Seoul National University # # Version : 1.0 # Date : Nov", ": 1.0 # Date : Nov 10, 2020 # Main Contact: Donghae Jang", "x.size(3)) x, h = self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 =", "for implementing DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None,", "32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64,", "DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM,", "self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4", "self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5, 1))", "True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) # self.lstm = myLSTM(7232,", "True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128,", "batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) # self.lstm", "self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.permute(0, 2, 1, 3)", "def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1) x = self.conv1(x)", "output[0,:,:] \"\"\" ######################################### return x, h # Code for implementing DeepConvLSTM # This", "myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first", "= torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output, (h1, c1)) #output =", "# U Kang (<EMAIL>), Seoul National University # # Version : 1.0 #", "1)) self.conv3 = nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64, 64, (5, 1))", "implementation of DeepcConvolutional part, and LSTM part will be added class DeepConv(nn.Module): def", "Kang (<EMAIL>), Seoul National University # # Version : 1.0 # Date :", "please contact the authors. # ################################################################################ import torch from torch.nn import Parameter, ParameterList", "torch.nn.functional as F import math from compressed_lstm import myLSTM from compressed_gru import myGRU", "= True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) def", "<NAME> (<EMAIL>), Seoul National University # U Kang (<EMAIL>), Seoul National University #", "self.fc3 = nn.Linear(84, 10) def forward(self, x, hidden=None): self.device = x.device x =", "National University # U Kang (<EMAIL>), Seoul National University # # Version :", "= True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1", "nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120,", "U Kang (<EMAIL>), Seoul National University # # Version : 1.0 # Date", "import Parameter, ParameterList import torch.nn as nn import torch.nn.functional as F import math", "batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5,", "hidden=None): self.device = x.device x = x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x)", "1.0 # Date : Nov 10, 2020 # Main Contact: Donghae Jang #", ": Lowrank and group-lowrank rnn # # Author: <NAME> (<EMAIL>), Seoul National University", "the authors. # ################################################################################ import torch from torch.nn import Parameter, ParameterList import torch.nn", "= self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device)", "output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return x, h # Code for implementing", "self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x, hidden=None): self.device", "True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru =", "rnn # # Author: <NAME> (<EMAIL>), Seoul National University # U Kang (<EMAIL>),", "# self.gru2 = nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device = x.device x", "Starlab RNN-compression with factorization method : Lowrank and group-lowrank rnn # # Author:", "added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64,", "= x.device x = x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x) x =", "= nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64, 64, (5, 1)) # self.lstm1", "= True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) self.lstm = myLSTM(7232,", "1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h = self.gru(x)", "# Main Contact: Donghae Jang # # This software is free of charge", "x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) return x", "Seoul National University # # Version : 1.0 # Date : Nov 10,", "nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1) x", "batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128) #", "self.conv4(x) x = x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) *", "= x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x,", "64, (5, 1)) self.conv4 = nn.Conv2d(64, 64, (5, 1)) # self.lstm1 = nn.LSTM(7232,", "= self.conv3(x) x = self.conv4(x) x = x.permute(0, 2, 1, 3) x =", "nn.LSTM(7232, 128, batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True)", "#output = output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return x, h # Code", "self.device = x.device x = x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x) x", "Parameter, ParameterList import torch.nn as nn import torch.nn.functional as F import math from", "self.gru2 = nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device = x.device x =", "and group-lowrank rnn # # Author: <NAME> (<EMAIL>), Seoul National University # U", "self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru = myGRU(7232, hidden_layer_sizes=[128,", "= nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x, hidden=None): self.device =", "torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) =", "################################################################################ # Starlab RNN-compression with factorization method : Lowrank and group-lowrank rnn #", "recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1))", "University # # Version : 1.0 # Date : Nov 10, 2020 #", "batch_first = True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) #", "h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h,", "128], batch_first = True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True)", "128) # self.gru2 = nn.LSTM(128, 128) def forward(self, x, hidden=None): self.device = x.device", "output, (h, c) = self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output = output[0,:,:]", "will be added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 =", "part will be added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1", "x.size(2) * x.size(3)) x, h = self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device)", "x.unsqueeze(1) x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x)", "Contact: Donghae Jang # # This software is free of charge under research", "(<EMAIL>), Seoul National University # U Kang (<EMAIL>), Seoul National University # #", "128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output, (h1, c1))", "RNN-compression with factorization method : Lowrank and group-lowrank rnn # # Author: <NAME>", "self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape)", "128], batch_first = True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128,", "authors. # ################################################################################ import torch from torch.nn import Parameter, ParameterList import torch.nn as", "hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2", "(h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device)", "hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 =", "# self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) def forward(self, x,", "of DeepcConvolutional part, and LSTM part will be added class DeepConv(nn.Module): def __init__(self,", "self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) def forward(self, x, hidden=None):", "# This is implementation of DeepcConvolutional part, and LSTM part will be added", "* x.size(3)) x, h = self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0", "= nn.LSTM(128, 128, batch_first = True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first", "= nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5", "# Starlab RNN-compression with factorization method : Lowrank and group-lowrank rnn # #", "# # This software is free of charge under research purposes. # For", "and LSTM part will be added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv,", "x = self.conv3(x) x = self.conv4(x) x = x.permute(0, 2, 1, 3) x", "batch_first=True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) self.fc1 =", "hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs): super(DeepConvLSTM, self).__init__() self.conv1 = nn.Conv2d(1,", "Donghae Jang # # This software is free of charge under research purposes.", "Code for implementing DeepConvLSTM # This is implementation of DeepcConvolutional part, and LSTM", "output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c) = self.lstm2(output, (h1,", "import myLSTM from compressed_gru import myGRU # Code for implementing DeepConvLSTM class DeepConvLSTM(nn.Module):", "method : Lowrank and group-lowrank rnn # # Author: <NAME> (<EMAIL>), Seoul National", "hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128)", "c1)) #output = output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return x, h #", "implementing DeepConvLSTM # This is implementation of DeepcConvolutional part, and LSTM part will", "# Code for implementing DeepConvLSTM # This is implementation of DeepcConvolutional part, and", "compressed_lstm import myLSTM from compressed_gru import myGRU # Code for implementing DeepConvLSTM class", "(h1, c1)) #output = output.permute(1,0,2) #output = output[0,:,:] \"\"\" ######################################### return x, h", "as F import math from compressed_lstm import myLSTM from compressed_gru import myGRU #", ": Nov 10, 2020 # Main Contact: Donghae Jang # # This software", "self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64, (5, 1)) self.conv3", "nn.Conv2d(64, 64, (5, 1)) self.conv4 = nn.Conv2d(64, 64, (5, 1)) # self.lstm1 =", "math from compressed_lstm import myLSTM from compressed_gru import myGRU # Code for implementing", "3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3)) x, h = self.gru(x) \"\"\"", "= True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru", "self.lstm2 = nn.LSTM(128, 128, batch_first = True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128],", "128) # self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5 * 5,", "# self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16", "filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1, 64, (5, 1)) self.conv2 = nn.Conv2d(64, 64,", "x, h # Code for implementing DeepConvLSTM # This is implementation of DeepcConvolutional", "120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x, hidden=None):", "batch_first = True) # self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True) #", "######################################### return x, h # Code for implementing DeepConvLSTM # This is implementation", "c) = self.lstm2(output, (h1, c1)) #output = output.permute(1,0,2) #output = output[0,:,:] \"\"\" #########################################", "128, batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first = True) self.lstm", "c0 = torch.zeros(1, x.size(0), 128).to(self.device) #print(x.shape) output, (h, c) = self.lstm1(x, (h0, c0))", "hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True) # self.gru1 = nn.LSTM(7232,", "\"\"\" ######################################### return x, h # Code for implementing DeepConvLSTM # This is", "self.gru1 = nn.LSTM(7232, 128) # self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16 *", "= self.lstm1(x, (h0, c0)) #print(output.shape) h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1,", "class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None, **kwargs):", "purposes. # For commercial purposes, please contact the authors. # ################################################################################ import torch", "h1 = torch.zeros(1, output.size(0), 128).to(self.device) c1 = torch.zeros(1, output.size(0), 128).to(self.device) output, (h, c)", "1)) # self.lstm1 = nn.LSTM(7232, 128, batch_first = True) # self.lstm2 = nn.LSTM(128,", "x = x.permute(0, 2, 1, 3) x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3))", "free of charge under research purposes. # For commercial purposes, please contact the", "import torch.nn as nn import torch.nn.functional as F import math from compressed_lstm import", "= myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1 = nn.LSTM(7232, 128) #", "software is free of charge under research purposes. # For commercial purposes, please", "be added class DeepConv(nn.Module): def __init__(self, filter_size=5, filter_count=64): super(DeepConv, self).__init__() self.conv1 = nn.Conv2d(1,", "True) # self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True) # self.gru1 =", "x.size(1), x.size(2) * x.size(3)) x, h = self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0),", "self.gru2 = nn.LSTM(128, 128) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2", "batch_first = True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128,", "Date : Nov 10, 2020 # Main Contact: Donghae Jang # # This", "import torch from torch.nn import Parameter, ParameterList import torch.nn as nn import torch.nn.functional", "is free of charge under research purposes. # For commercial purposes, please contact", "of charge under research purposes. # For commercial purposes, please contact the authors.", "forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1) x = self.conv1(x) x", "x, h = self.gru(x) \"\"\" h0 = torch.zeros(1, x.size(0), 128).to(self.device) c0 = torch.zeros(1,", "10) def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1) x =", "True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True)", "# ################################################################################ import torch from torch.nn import Parameter, ParameterList import torch.nn as nn", "DeepConvLSTM class DeepConvLSTM(nn.Module): def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None, hidden_inits=None, wRank=None, uRank=None,", "= self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x =", "commercial purposes, please contact the authors. # ################################################################################ import torch from torch.nn import", "self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3", "self.lstm1 = nn.LSTM(7232, 128, batch_first = True) # self.lstm2 = nn.LSTM(128, 128, batch_first", "nn.Linear(84, 10) def forward(self, x, hidden=None): self.device = x.device x = x.unsqueeze(1) x", "= True) self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True) self.gru = myGRU(7232, hidden_layer_sizes=[128, 128],", "128) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84)", "under research purposes. # For commercial purposes, please contact the authors. # ################################################################################" ]
[ "k in indexes] if index % self.batches_per_file == 0 or self.tmp_data == None:", "self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None # Get metadata from", "None, output_folder = None, warm_start = False, allow_abs_path_folder_generation = False, ): self.train_config =", "+ \"_model_final.h5\") def _get_model(self): return self.model.model # def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/')", "# else: # tmp_dir_str += '/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str): #", "def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle ==", "import uuid import os import pandas as pd import psutil import pickle #import", "index) #print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) #", "if folder string supplied defines a relative or absolute path # if not", "is not None, 'You did not supply a folder for saving the model'", "# print('Some problem occured when creating the directory ', tmp_dir_str) # else: #", "return X, y def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.file_IDs))", "# rel_folder = False # i = 0 # # # while i", "= self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape", "+ '/' + self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config = None, input_shape =", "# output_folder_list = self.output_folder.split('/') # # Check if folder string supplied defines a", "* self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1) X, y =", "model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i -", "# rel_folder = True # i = 1 # else: # rel_folder =", "verbose = 1, min_delta = 0.0001, min_lr = 0.00000001)) else: print('Provided a string", "self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor((len(self.file_IDs) *", "Check if folder string supplied defines a relative or absolute path # if", "pandas as pd import psutil import pickle #import kde_info #from lanfactory.config import import", "False. No folders will be generated.') # return # else: # rel_folder =", "factor = 0.1, patience = 5, verbose = 1, min_delta = 0.0001, min_lr", "# Check if folder string supplied defines a relative or absolute path #", "None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index): self.tmp_data", "while i < len(output_folder_list): # if i == 0: # tmp_dir_str += output_folder_list[i]", "# os.makedirs(tmp_dir_str) # except: # print('Some problem occured when creating the directory ',", "arbitrary input file sizes ? # Initialization self.batch_size = batch_size #self.labels = labels", "== None: #self.tmp_file = #print('index') #print('debugging') #print('loading new datafile') #print('batch: ', index) #print('new", "1): if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation =", "= False # i = 0 # # # while i < len(output_folder_list):", "self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense Layers", "/ self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' # Generate indexes", "self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder", "= 1 # else: # rel_folder = False # i = 0 #", "+ output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = '' # i = 0 #", "import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True,", "__try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # # Check if folder string supplied defines", "self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train =", "return # else: # rel_folder = True # i = 1 # else:", "input_shape self.network_config = network_config self.model = self.__build_model() def __build_model(self): model = keras.Sequential() for", "while i < len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i) # else: #", "cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose = 1, patience", "self.batches_per_file) + 1) * self.batch_size, 1) X, y = self.__data_generation(batch_ids) return X, y", "self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file =", "else: print('Provided a string for a callback function that is none of: checkpoint,", "if not 'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving final state of the", "import pandas as pd import psutil import pickle #import kde_info #from lanfactory.config import", "# output_folder_list.pop(0) # tmp_dir_str = '' # i = 0 # while i", "% self.batches_per_file == 0 or self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging') #print('loading", "= verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\") if", "= batch_size #self.labels = labels self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low =", "labels self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high", "\"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving final state of", "print('Some problem occured when creating the directory ', tmp_dir_str) # else: # print('Found", "if self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is", "self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return np.load(self.training_data_folder + '/' +", "data_generator_train = None, data_generator_val = None, model = None, output_folder = None, warm_start", "'mse': self.loss_fun = 'mse' return def __get_optimizer(self): # Adam example here needs optimizer", "for a callback function that is none of: checkpoint, earlystopping, reducelr') def __compile_model(self):", "it...') # try: # os.makedirs(tmp_dir_str) # except: # print('Some problem occured when creating", "for cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder + '/'", "= self.cb_list, verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id", "#from lanfactory.config import import tensorflow as tf from tensorflow import keras from tensorflow.keras.models", "IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number of batches", "keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1): if i == 0: model.add(keras.layers.Dense(units =", "# List physical devices #print(tf.config.list_physical_devices()) # Do I allow for arbitrary input file", "-1] if self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high", "self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch'", "index // self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate data batch_ids =", "): # List physical devices #print(tf.config.list_physical_devices()) # Do I allow for arbitrary input", "y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size =", "when creating the directory ', tmp_dir_str) # else: # print('Found folder: ', tmp_dir_str)", "'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = []", "min_lr = 0.00000001)) else: print('Provided a string for a callback function that is", "self.input_dim), dtype = np.float32) y = np.empty((self.batch_size, self.label_dim), dtype = np.float32) X =", "epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder", "'Generates data for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, #", "= allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun", "folder path provided, but setting allow_abs_path_folder_generation = False. No folders will be generated.')", "= 1): history = self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'],", "rel_folder = False # i = 0 # # # while i <", "else: # tmp_dir_str += '/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did", "not None, 'You did not supply a folder for saving the model' try_gen_folder(folder", "number of batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self,", "self.batch_size, 1) X, y = self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates indexes", "else: # print('Found folder: ', tmp_dir_str) # print('Moving on...') # i += 1", "Generate indexes of the batch # Find list of IDs #file_IDs_temp = [self.file_IDs[k]", "self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data", "'Updates indexes after each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes)", "init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1:", "uuid.uuid1().hex + '_' + generative_model_id self.save_folder = save_folder self.input_shape = input_shape self.network_config =", "save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']:", "i < len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i) # else: # i", "import device_lib import warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for", "indexes after each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def", "self.__build_model() def __build_model(self): model = keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1): if", "tf from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.python.client import device_lib", "in range(len(self.network_config['layer_sizes']) + 1): if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim =", "return def __get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = [] for", "batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) *", "Adam example here needs optimizer only as a string # We can have", "self.model.model # def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # # Check if folder", "data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights()", "IDs #file_IDs_temp = [self.file_IDs[k] for k in indexes] if index % self.batches_per_file ==", "None: #self.tmp_file = #print('index') #print('debugging') #print('loading new datafile') #print('batch: ', index) #print('new file", "save_history = True , verbose = 1): history = self.model.model.fit(x = self.data_generator_train, validation_data", "checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model", "len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return np.load(self.training_data_folder", "batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate", "folder string supplied defines a relative or absolute path # if not output_folder_list[0]:", "reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics = self.metrics) def", "data for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label", "'You did not supply a folder for saving the model' try_gen_folder(folder = self.save_folder,", "% self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1) X,", "def __load_weights(self): # If warmstart == True, we load model weights and start", "#print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file", "model, since callbacks did not include checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id", "\"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config = None,", "i in range(len(self.network_config['layer_sizes']) + 1): if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim", "data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder,", "= int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim =", "warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No folders will be", "# try: # os.makedirs(tmp_dir_str) # except: # print('Some problem occured when creating the", "need to supply a network config dict' self.model_id = uuid.uuid1().hex + '_' +", "self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate data batch_ids = np.arange(((index % self.batches_per_file)", "for arbitrary input file sizes ? # Initialization self.batch_size = batch_size #self.labels =", "= None, output_folder = None, warm_start = False, allow_abs_path_folder_generation = False, ): self.train_config", "self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' # Generate indexes of", "load model weights and start training from there ! return def train_model(self, save_history", "but setting allow_abs_path_folder_generation = False. No folders will be generated.') # return #", "allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self): if self.train_config['loss'] == 'huber':", "0: # tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str += '/' + output_folder_list[i]", "batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size,", "self.model.to_yaml() assert self.save_folder is not None, 'You did not supply a folder for", "= 0 # while i < len(output_folder_list): # if i == 0: #", "try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self): if", "= 'val_loss', min_delta = 0, verbose = 1, patience = 10)) elif cb_tmp", "assert network_config is not None, 'You need to supply a network config dict'", "def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics = self.metrics) def __load_weights(self):", "as pd import psutil import pickle #import kde_info #from lanfactory.config import import tensorflow", "self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim =", "self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder", "from there ! return def train_model(self, save_history = True , verbose = 1):", "= 0.0001, min_lr = 0.00000001)) else: print('Provided a string for a callback function", "# # # while i < len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i)", "= warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation =", "config dict' self.model_id = uuid.uuid1().hex + '_' + generative_model_id self.save_folder = save_folder self.input_shape", "not output_folder_list[i]: # output_folder_list.pop(i) # else: # i += 1 # if rel_folder:", "the number of batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def", "to supply a network config dict' self.model_id = uuid.uuid1().hex + '_' + generative_model_id", "__load_weights(self): # If warmstart == True, we load model weights and start training", "# if i == 0: # tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str", "X, y = self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates indexes after each", "List physical devices #print(tf.config.list_physical_devices()) # Do I allow for arbitrary input file sizes", "#print('loading new datafile') #print('batch: ', index) #print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index", "verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\") if not", "self.optimizer = 'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list", "# def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # # Check if folder string", "i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i])) else:", "== 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i - 1])) else:", "activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i", "True, we load model weights and start training from there ! return def", "+= 1 # if rel_folder: # output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0)", "= label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None # Get", "shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :]", "% self.batches_per_file) + 1) * self.batch_size, 1) X, y = self.__data_generation(batch_ids) return X,", "# while i < len(output_folder_list): # if i == 0: # tmp_dir_str +=", "# tmp_dir_str = '' # i = 0 # while i < len(output_folder_list):", "= 1 return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel: def __init__(self,", "import pickle #import kde_info #from lanfactory.config import import tensorflow as tf from tensorflow", "self.metrics) def __load_weights(self): # If warmstart == True, we load model weights and", "output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start", "not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No", "True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates data containing batch_size samples' #", "np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] =", "'mse' return def __get_optimizer(self): # Adam example here needs optimizer only as a", "uuid import os import pandas as pd import psutil import pickle #import kde_info", "model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i - 1])) else: raise ValueError(\"Only", "'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience = 5, verbose = 1,", "network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert", "init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs':", "= input_shape self.network_config = network_config self.model = self.__build_model() def __build_model(self): model = keras.Sequential()", "# print('Found folder: ', tmp_dir_str) # print('Moving on...') # i += 1 #", "tensorflow.python.client import device_lib import warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data", "__load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace", "# Do I allow for arbitrary input file sizes ? # Initialization self.batch_size", "of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics", "verbose = 1): history = self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs =", "= tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return def __get_optimizer(self): #", "folder for saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder +", "train_model(self, save_history = True , verbose = 1): history = self.model.model.fit(x = self.data_generator_train,", "self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val", "0.1, patience = 5, verbose = 1, min_delta = 0.0001, min_lr = 0.00000001))", "--> check your network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec", "'val_loss', verbose = 1, save_best_only = False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor =", "prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high = None, ): # List physical devices", "# if rel_folder: # output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str", "None, ): # List physical devices #print(tf.config.list_physical_devices()) # Do I allow for arbitrary", "def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict", "a callback function that is none of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss", "= None, model = None, output_folder = None, warm_start = False, allow_abs_path_folder_generation =", "allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq:", "metadata from loading a test file.... # FILL IN # self.file_shape_dict = self.__init_file_shape()", "< np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)] =", "self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose, ) if save_history:", "try: # os.makedirs(tmp_dir_str) # except: # print('Some problem occured when creating the directory", "< len(output_folder_list): # if i == 0: # tmp_dir_str += output_folder_list[i] # else:", "# label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high = None, ): # List", "= [] for cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder", "self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks()", "not find folder: ', tmp_dir_str) # print('Creating it...') # try: # os.makedirs(tmp_dir_str) #", "not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but setting", "__compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics = self.metrics) def __load_weights(self): #", "1, patience = 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor =", "== True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates data containing batch_size samples'", "if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation =", "self.batches_per_file]) # Generate data batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index %", "* self.batch_size, 1) X, y = self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates", "input_shape = 10, save_folder = None, generative_model_id = 'ddm'): assert network_config is not", "as a functions or class too if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam'", "self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates data containing batch_size", "if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i]))", "self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) #", "= self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def", "# Generate indexes of the batch # Find list of IDs #file_IDs_temp =", "+= output_folder_list[i] # else: # tmp_dir_str += '/' + output_folder_list[i] # if not", "== 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return", "= shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data =", "for i in range(len(self.network_config['layer_sizes']) + 1): if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i],", "psutil import pickle #import kde_info #from lanfactory.config import import tensorflow as tf from", "+ self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train", "self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes =", "# while i < len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i) # else:", "model = None, output_folder = None, warm_start = False, allow_abs_path_folder_generation = False, ):", "did not include checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\") def", "((index % self.batches_per_file) + 1) * self.batch_size, 1) X, y = self.__data_generation(batch_ids) return", "> np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index],", "'/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1, save_best_only", "label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None # Get metadata", "10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience =", "n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim), dtype = np.float32) y = np.empty((self.batch_size,", "allow for arbitrary input file sizes ? # Initialization self.batch_size = batch_size #self.labels", "file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace =", "callbacks did not include checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\")", "cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name,", "= None, data_generator_val = None, model = None, output_folder = None, warm_start =", "data batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1)", "if self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y", "self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class", "'ddm'): assert network_config is not None, 'You need to supply a network config", "self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO", "#self.tmp_file = #print('index') #print('debugging') #print('loading new datafile') #print('batch: ', index) #print('new file loaded:',", "* self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' #", "warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self,", "dtype = np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids,", "= self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense Layers for now --> check", "self.optimizer as a functions or class too if self.train_config['optimizer'] == 'adam': self.optimizer =", "be generated.') # return # else: # rel_folder = True # i =", "#import kde_info #from lanfactory.config import import tensorflow as tf from tensorflow import keras", "= np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size,", "= 0 # # # while i < len(output_folder_list): # if not output_folder_list[i]:", "output_folder_list.pop(i) # else: # i += 1 # if rel_folder: # output_folder_list[1] =", "__len__(self): 'Denotes the number of batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) /", "None, 'You need to supply a network config dict' self.model_id = uuid.uuid1().hex +", "creating the directory ', tmp_dir_str) # else: # print('Found folder: ', tmp_dir_str) #", "the model, since callbacks did not include checkpoint creation') self.model.model.save(self.output_folder + \"/\" +", "not include checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self):", "'/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ',", "batch # Find list of IDs #file_IDs_temp = [self.file_IDs[k] for k in indexes]", "False # i = 0 # # # while i < len(output_folder_list): #", "output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation", "= pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data']", "10, save_folder = None, generative_model_id = 'ddm'): assert network_config is not None, 'You", "'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) >", "if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False.", "= False): spec = self.model.to_yaml() assert self.save_folder is not None, 'You did not", "cutoff --> label_preprocessor ? label_prelog_cutoff_high = None, ): # List physical devices #print(tf.config.list_physical_devices())", "return X, y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0],", "#print(tf.config.list_physical_devices()) # Do I allow for arbitrary input file sizes ? # Initialization", "np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X,", "X, y def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.file_IDs)) if", "= {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1]", "for saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\"", "def __get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = [] for cb_tmp", "self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1, save_best_only = False)) elif cb_tmp ==", "# tmp_dir_str += '/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did not", "and start training from there ! return def train_model(self, save_history = True ,", "self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return def __get_optimizer(self): # Adam example here", "# else: # rel_folder = False # i = 0 # # #", "_save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert self.save_folder is not None, 'You", "os.path.exists(tmp_dir_str): # print('Did not find folder: ', tmp_dir_str) # print('Creating it...') # try:", "training_data_folder self.tmp_data = None # Get metadata from loading a test file.... #", "class ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train = None, data_generator_val = None,", "or self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging') #print('loading new datafile') #print('batch: ',", "! return def train_model(self, save_history = True , verbose = 1): history =", "that is none of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer", "as tf from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.python.client import", "= 1, save_best_only = False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta", "a string for a callback function that is none of: checkpoint, earlystopping, reducelr')", "#print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate", "self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1) X, y = self.__data_generation(batch_ids)", "we load model weights and start training from there ! return def train_model(self,", ", verbose = 1): history = self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs", "# if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation =", "self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units =", "if index % self.batches_per_file == 0 or self.tmp_data == None: #self.tmp_file = #print('index')", "True # i = 1 # else: # rel_folder = False # i", "1): history = self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks", "in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder + '/' + self.model.model_id", "__get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = [] for cb_tmp in", "= 5, verbose = 1, min_delta = 0.0001, min_lr = 0.00000001)) else: print('Provided", "here needs optimizer only as a string # We can have self.optimizer as", "indexes] if index % self.batches_per_file == 0 or self.tmp_data == None: #self.tmp_file =", "train_config self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train", "lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, file_IDs, batch_size=32,", "Dense Layers for now --> check your network config\") return model def _save_model_yaml(self,", "allow_abs_path_folder_generation = False. No folders will be generated.') # return # else: #", "np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high)", "final state of the model, since callbacks did not include checkpoint creation') self.model.model.save(self.output_folder", "= self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i -", "+ '_' + generative_model_id self.save_folder = save_folder self.input_shape = input_shape self.network_config = network_config", "did not supply a folder for saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation", "else: self.label_dim = 1 return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel:", "the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id", "= np.float32) y = np.empty((self.batch_size, self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids, :]", "+ \"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model # def __try_gen_output_folder(self):", "= 'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list =", "np import uuid import os import pandas as pd import psutil import pickle", "or class too if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return def __get_metrics(self):", "i < len(output_folder_list): # if i == 0: # tmp_dir_str += output_folder_list[i] #", "output_folder_list[i] # else: # tmp_dir_str += '/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str):", "def __get_optimizer(self): # Adam example here needs optimizer only as a string #", "1], activation = self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense Layers for now", "the directory ', tmp_dir_str) # else: # print('Found folder: ', tmp_dir_str) # print('Moving", "def __init__(self, network_config = None, input_shape = 10, save_folder = None, generative_model_id =", "tmp_dir_str) # else: # print('Found folder: ', tmp_dir_str) # print('Moving on...') # i", "If warmstart == True, we load model weights and start training from there", "# If warmstart == True, we load model weights and start training from", "= self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' return", "batch_ids = None): 'Generates data containing batch_size samples' # X : (n_samples, *dim,", "): self.train_config = train_config self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation", "only as a string # We can have self.optimizer as a functions or", "i += 1 # if rel_folder: # output_folder_list[1] = '/' + output_folder_list[1] #", "ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss',", "__init__(self, train_config = None, data_generator_train = None, data_generator_val = None, model = None,", "__build_model(self): model = keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1): if i ==", "self.output_folder + '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose =", "each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids", "= save_folder self.input_shape = input_shape self.network_config = network_config self.model = self.__build_model() def __build_model(self):", "0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i", "self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate data batch_ids = np.arange(((index %", "== 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i])) else: if", "pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels':", "label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high = None, ): # List physical", "#self.training_data_folder = training_data_folder self.tmp_data = None # Get metadata from loading a test", "= '/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = '' # i =", "# Find list of IDs #file_IDs_temp = [self.file_IDs[k] for k in indexes] if", "def __data_generation(self, batch_ids = None): 'Generates data containing batch_size samples' # X :", "patience = 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1,", "if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in", "= self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes", "= self.__build_model() def __build_model(self): model = keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1):", "= training_data_folder self.tmp_data = None # Get metadata from loading a test file....", "'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape}", "= allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer()", ": (n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim), dtype = np.float32)", "= np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx", "= labels self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high =", "= data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model()", "weights and start training from there ! return def train_model(self, save_history = True", "since callbacks did not include checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id +", "import load_model from tensorflow.python.client import device_lib import warnings from lanfactory.utils import try_gen_folder class", "index % self.batches_per_file == 0 or self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging')", "self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim", "self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape)", "if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics']", "= None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'): assert network_config", "def train_model(self, save_history = True , verbose = 1): history = self.model.model.fit(x =", "of the model, since callbacks did not include checkpoint creation') self.model.model.save(self.output_folder + \"/\"", "return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate one batch of", "True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' +", "self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model # def", "list of IDs #file_IDs_temp = [self.file_IDs[k] for k in indexes] if index %", "tmp_dir_str += '/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did not find", "import keras from tensorflow.keras.models import load_model from tensorflow.python.client import device_lib import warnings from", "__get_callbacks(self): self.cb_list = [] for cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name", "+ output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did not find folder: ', tmp_dir_str)", "kde_info #from lanfactory.config import import tensorflow as tf from tensorflow import keras from", "False): spec = self.model.to_yaml() assert self.save_folder is not None, 'You did not supply", "'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving final state of the model, since", "training from there ! return def train_model(self, save_history = True , verbose =", "+ '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape:", "np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates data containing batch_size samples' # X", "class too if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return def __get_metrics(self): self.metrics", "? label_prelog_cutoff_high = None, ): # List physical devices #print(tf.config.list_physical_devices()) # Do I", "self.save_folder is not None, 'You did not supply a folder for saving the", "= self.output_folder.split('/') # # Check if folder string supplied defines a relative or", "dict' self.model_id = uuid.uuid1().hex + '_' + generative_model_id self.save_folder = save_folder self.input_shape =", "Do I allow for arbitrary input file sizes ? # Initialization self.batch_size =", "a folder for saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder", "== 'checkpoint': ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor", "np.empty((self.batch_size, self.input_dim), dtype = np.float32) y = np.empty((self.batch_size, self.label_dim), dtype = np.float32) X", "np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config = None, input_shape", "= 0.00000001)) else: print('Provided a string for a callback function that is none", "AF-TODO import folder def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif", "--> label_preprocessor ? label_prelog_cutoff_high = None, ): # List physical devices #print(tf.config.list_physical_devices()) #", "batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high", "== 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose = 1, patience =", "warmstart == True, we load model weights and start training from there !", "output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = '' # i", "self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' # Generate", "= np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates", "a string # We can have self.optimizer as a functions or class too", "int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate one batch of data'", "self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config = None, input_shape = 10, save_folder =", "\"_model_final.h5\") def _get_model(self): return self.model.model # def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') #", "verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\")", "except: # print('Some problem occured when creating the directory ', tmp_dir_str) # else:", "one batch of data' # Generate indexes of the batch # Find list", "self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class", "# Save Model print('Saving final state of the model, since callbacks did not", "No folders will be generated.') # return # else: # rel_folder = True", "self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low)", "+ \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train = None,", "self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss()", "= self.optimizer, metrics = self.metrics) def __load_weights(self): # If warmstart == True, we", "ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train = None, data_generator_val = None, model", "FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number of", "config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert self.save_folder", "directory ', tmp_dir_str) # else: # print('Found folder: ', tmp_dir_str) # print('Moving on...')", "= 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience", "patience = 5, verbose = 1, min_delta = 0.0001, min_lr = 0.00000001)) else:", "needs optimizer only as a string # We can have self.optimizer as a", "= self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units", "self.cb_list = [] for cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name =", "samples' # X : (n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim),", "+ self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1, save_best_only =", "self.loss_fun, optimizer = self.optimizer, metrics = self.metrics) def __load_weights(self): # If warmstart ==", "numpy as np import uuid import os import pandas as pd import psutil", "Initialization self.batch_size = batch_size #self.labels = labels self.file_IDs = file_IDs self.shuffle = shuffle", "= self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)] =", "self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] ==", "= self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec)", "i = 1 # else: # rel_folder = False # i = 0", "False, allow_abs_path_folder_generation = False, ): self.train_config = train_config self.model = model self.output_folder =", "rel_folder: # output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = ''", "test file.... # FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes", "generative_model_id self.save_folder = save_folder self.input_shape = input_shape self.network_config = network_config self.model = self.__build_model()", "self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1],", "def __init__(self, train_config = None, data_generator_train = None, data_generator_val = None, model =", "== 'mse': self.loss_fun = 'mse' return def __get_optimizer(self): # Adam example here needs", "monitor = 'val_loss', verbose = 1, save_best_only = False)) elif cb_tmp == 'earlystopping':", "np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx =", "Model print('Saving final state of the model, since callbacks did not include checkpoint", "+ self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model # def __try_gen_output_folder(self): # output_folder_list", "= np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return", "os.makedirs(tmp_dir_str) # except: # print('Some problem occured when creating the directory ', tmp_dir_str)", "None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y >", "string # We can have self.optimizer as a functions or class too if", "containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization X =", "string for a callback function that is none of: checkpoint, earlystopping, reducelr') def", "os import pandas as pd import psutil import pickle #import kde_info #from lanfactory.config", "label_preprocessor ? label_prelog_cutoff_high = None, ): # List physical devices #print(tf.config.list_physical_devices()) # Do", "save_folder = None, generative_model_id = 'ddm'): assert network_config is not None, 'You need", "train_config = None, data_generator_train = None, data_generator_val = None, model = None, output_folder", "self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = [] for cb_tmp in self.train_config['callbacks']: if cb_tmp", "= 'val_loss', factor = 0.1, patience = 5, verbose = 1, min_delta =", "y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)]", "= self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense", "+ '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1,", "self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder", "#tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None: y[y", "def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0],", "else: # rel_folder = True # i = 1 # else: # rel_folder", "save_best_only = False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0,", "None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'): assert network_config is", "not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index):", "self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder", "= self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0],", "pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] =", "None # Get metadata from loading a test file.... # FILL IN #", "import warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def", "print('Saving final state of the model, since callbacks did not include checkpoint creation')", "network_config = None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'): assert", "class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low =", "i == 0: # tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str += '/'", "'val_loss', min_delta = 0, verbose = 1, patience = 10)) elif cb_tmp ==", "- 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i -", "shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high =", "file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder", "indexes of the batch # Find list of IDs #file_IDs_temp = [self.file_IDs[k] for", "None, data_generator_train = None, data_generator_val = None, model = None, output_folder = None,", "None, model = None, output_folder = None, warm_start = False, allow_abs_path_folder_generation = False,", "y = np.empty((self.batch_size, self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1]", "y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)]", "model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert self.save_folder is not", ":-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None: y[y <", "def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # # Check if folder string supplied", "+ 1) * self.batch_size, 1) X, y = self.__data_generation(batch_ids) return X, y def", "def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert self.save_folder is not None,", "allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert self.save_folder is not None, 'You did", "== 0: # tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str += '/' +", "tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str += '/' + output_folder_list[i] # if", "= 0.1, patience = 5, verbose = 1, min_delta = 0.0001, min_lr =", "keras from tensorflow.keras.models import load_model from tensorflow.python.client import device_lib import warnings from lanfactory.utils", "class KerasModel: def __init__(self, network_config = None, input_shape = 10, save_folder = None,", "X : (n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim), dtype =", "loading a test file.... # FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def", "folder: ', tmp_dir_str) # print('Creating it...') # try: # os.makedirs(tmp_dir_str) # except: #", "self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i", "print('Creating it...') # try: # os.makedirs(tmp_dir_str) # except: # print('Some problem occured when", "tmp_dir_str) # print('Creating it...') # try: # os.makedirs(tmp_dir_str) # except: # print('Some problem", "self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None #", "= True # i = 1 # else: # rel_folder = False #", "= None, ): # List physical devices #print(tf.config.list_physical_devices()) # Do I allow for", "tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return def __get_optimizer(self): # Adam", "/ self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else:", "self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse'", "self.output_folder.split('/') # # Check if folder string supplied defines a relative or absolute", "defines a relative or absolute path # if not output_folder_list[0]: # if not", "None, warm_start = False, allow_abs_path_folder_generation = False, ): self.train_config = train_config self.model =", "raise ValueError(\"Only Dense Layers for now --> check your network config\") return model", "def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse':", "0 # while i < len(output_folder_list): # if i == 0: # tmp_dir_str", "output_folder = None, warm_start = False, allow_abs_path_folder_generation = False, ): self.train_config = train_config", "Layers for now --> check your network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation", "creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model #", "self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True)", "example here needs optimizer only as a string # We can have self.optimizer", "on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True:", "# FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number", "import psutil import pickle #import kde_info #from lanfactory.config import import tensorflow as tf", "state of the model, since callbacks did not include checkpoint creation') self.model.model.save(self.output_folder +", "self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1) X, y", "elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return def __get_optimizer(self): # Adam example", "output_folder_list.pop(0) # tmp_dir_str = '' # i = 0 # while i <", "= np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels']", "self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index]))", "try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\",", "self.model = self.__build_model() def __build_model(self): model = keras.Sequential() for i in range(len(self.network_config['layer_sizes']) +", "network_config self.model = self.__build_model() def __build_model(self): model = keras.Sequential() for i in range(len(self.network_config['layer_sizes'])", "self.train_config['callbacks']: # Save Model print('Saving final state of the model, since callbacks did", "# else: # print('Found folder: ', tmp_dir_str) # print('Moving on...') # i +=", "ValueError(\"Only Dense Layers for now --> check your network config\") return model def", "# Initialization X = np.empty((self.batch_size, self.input_dim), dtype = np.float32) y = np.empty((self.batch_size, self.label_dim),", "not None, 'You need to supply a network config dict' self.model_id = uuid.uuid1().hex", "_get_model(self): return self.model.model # def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # # Check", "if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return", "rel_folder = True # i = 1 # else: # rel_folder = False", "allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self,", "now --> check your network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation = False):", "data_generator_val = None, model = None, output_folder = None, warm_start = False, allow_abs_path_folder_generation", "self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics = self.metrics) def __load_weights(self): # If", "self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import", "'val_loss', factor = 0.1, patience = 5, verbose = 1, min_delta = 0.0001,", "min_delta = 0.0001, min_lr = 0.00000001)) else: print('Provided a string for a callback", "self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model # def __try_gen_output_folder(self): # output_folder_list =", "network config dict' self.model_id = uuid.uuid1().hex + '_' + generative_model_id self.save_folder = save_folder", "#return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config = None,", "batch of data' # Generate indexes of the batch # Find list of", "none of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer,", "not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None: y[y", "load_model from tensorflow.python.client import device_lib import warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence):", "\"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train = None, data_generator_val", "callbacks = self.cb_list, verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" +", "if not os.path.exists(tmp_dir_str): # print('Did not find folder: ', tmp_dir_str) # print('Creating it...')", "# print('Did not find folder: ', tmp_dir_str) # print('Creating it...') # try: #", "elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience = 5,", "self.cb_list, verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id +", "from tensorflow.python.client import device_lib import warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates", "self.save_folder = save_folder self.input_shape = input_shape self.network_config = network_config self.model = self.__build_model() def", "= self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose =", "cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience = 5, verbose", "0 or self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging') #print('loading new datafile') #print('batch:", "'' # i = 0 # while i < len(output_folder_list): # if i", "# print('Creating it...') # try: # os.makedirs(tmp_dir_str) # except: # print('Some problem occured", "Get metadata from loading a test file.... # FILL IN # self.file_shape_dict =", "+ self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config = None, input_shape = 10, save_folder", "validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose, )", "output_folder_list[i]: # output_folder_list.pop(i) # else: # i += 1 # if rel_folder: #", "'adam': self.optimizer = 'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics'] return def __get_callbacks(self):", "init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim", "= None): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)", "model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id +", "5, verbose = 1, min_delta = 0.0001, min_lr = 0.00000001)) else: print('Provided a", "= self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return", "= file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder =", "range(len(self.network_config['layer_sizes']) + 1): if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape,", "= self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list,", "from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, file_IDs,", "network_config is not None, 'You need to supply a network config dict' self.model_id", "= 'mse' return def __get_optimizer(self): # Adam example here needs optimizer only as", "return self.model.model # def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # # Check if", "KerasModel: def __init__(self, network_config = None, input_shape = 10, save_folder = None, generative_model_id", "folders will be generated.') # return # else: # rel_folder = True #", "find folder: ', tmp_dir_str) # print('Creating it...') # try: # os.makedirs(tmp_dir_str) # except:", "optimizer only as a string # We can have self.optimizer as a functions", "tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.python.client import device_lib import warnings", "'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return def", "self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun = 'mse' return def __get_optimizer(self):", "as np import uuid import os import pandas as pd import psutil import", "= 1, min_delta = 0.0001, min_lr = 0.00000001)) else: print('Provided a string for", "1e-7, # label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high = None, ): #", "= None, generative_model_id = 'ddm'): assert network_config is not None, 'You need to", "self.train_config = train_config self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train", "= model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val =", "not supply a folder for saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation =", "if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5'", "self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids]", "#self.labels = labels self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high", "# self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number of batches per", "= self.indexes[index // self.batches_per_file]) # Generate data batch_ids = np.arange(((index % self.batches_per_file) *", "label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None # Get metadata from loading a", "X = np.empty((self.batch_size, self.input_dim), dtype = np.float32) y = np.empty((self.batch_size, self.label_dim), dtype =", "{'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if", "__init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor", "Generate data batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) +", "return def __get_callbacks(self): self.cb_list = [] for cb_tmp in self.train_config['callbacks']: if cb_tmp ==", "// self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate data batch_ids = np.arange(((index", "np.float32) y = np.empty((self.batch_size, self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids,", "self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation", "in self.train_config['callbacks']: # Save Model print('Saving final state of the model, since callbacks", "warm_start = False, allow_abs_path_folder_generation = False, ): self.train_config = train_config self.model = model", "= self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder +", "'_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1, save_best_only = False)) elif cb_tmp", "None, 'You did not supply a folder for saving the model' try_gen_folder(folder =", "self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start", "y = self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates indexes after each epoch'", "0, verbose = 1, patience = 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor =", "verbose = 1, patience = 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',", "if not output_folder_list[i]: # output_folder_list.pop(i) # else: # i += 1 # if", "try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low", "0 # # # while i < len(output_folder_list): # if not output_folder_list[i]: #", "= keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1): if i == 0: model.add(keras.layers.Dense(units", "of batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index):", "'Generate one batch of data' # Generate indexes of the batch # Find", "too if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return def __get_metrics(self): self.metrics =", "import os import pandas as pd import psutil import pickle #import kde_info #from", "self.batch_size = batch_size #self.labels = labels self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low", "= np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1]", "self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim", "model weights and start training from there ! return def train_model(self, save_history =", "self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose,", "== True, we load model weights and start training from there ! return", "min_delta = 0, verbose = 1, patience = 10)) elif cb_tmp == 'reducelr':", "+ self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving", "= True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/'", "setting allow_abs_path_folder_generation = False. No folders will be generated.') # return # else:", "'checkpoint': ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor =", ") if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\") if not 'checkpoint'", "# Adam example here needs optimizer only as a string # We can", "self.tmp_data = None # Get metadata from loading a test file.... # FILL", "self.indexes[index // self.batches_per_file]) # Generate data batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size),", ":] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file", "= self.metrics) def __load_weights(self): # If warmstart == True, we load model weights", "output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = '' # i = 0 # while", "shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0]", "# We can have self.optimizer as a functions or class too if self.train_config['optimizer']", "+ generative_model_id self.save_folder = save_folder self.input_shape = input_shape self.network_config = network_config self.model =", "# output_folder_list.pop(i) # else: # i += 1 # if rel_folder: # output_folder_list[1]", "from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.python.client import device_lib import", "generative_model_id = 'ddm'): assert network_config is not None, 'You need to supply a", "self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose", "+ \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving final state", "# # Check if folder string supplied defines a relative or absolute path", "of data' # Generate indexes of the batch # Find list of IDs", "the batch # Find list of IDs #file_IDs_temp = [self.file_IDs[k] for k in", "self.model_id = uuid.uuid1().hex + '_' + generative_model_id self.save_folder = save_folder self.input_shape = input_shape", "+ \"/\" + self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: # Save", "supplied defines a relative or absolute path # if not output_folder_list[0]: # if", "a test file.... # FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self):", "__getitem__(self, index): 'Generate one batch of data' # Generate indexes of the batch", "Initialization X = np.empty((self.batch_size, self.input_dim), dtype = np.float32) y = np.empty((self.batch_size, self.label_dim), dtype", "np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if", "1 return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config", "None, data_generator_val = None, model = None, output_folder = None, warm_start = False,", "optimizer = self.optimizer, metrics = self.metrics) def __load_weights(self): # If warmstart == True,", "def __get_callbacks(self): self.cb_list = [] for cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint':", "= network_config self.model = self.__build_model() def __build_model(self): model = keras.Sequential() for i in", "y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index): self.tmp_data =", "i = 0 # # # while i < len(output_folder_list): # if not", "= np.empty((self.batch_size, self.input_dim), dtype = np.float32) y = np.empty((self.batch_size, self.label_dim), dtype = np.float32)", "physical devices #print(tf.config.list_physical_devices()) # Do I allow for arbitrary input file sizes ?", "# i = 1 # else: # rel_folder = False # i =", "= False, ): self.train_config = train_config self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation", "*dim, n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim), dtype = np.float32) y =", "1, min_delta = 0.0001, min_lr = 0.00000001)) else: print('Provided a string for a", "epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids =", "= data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder =", "# i = 0 # while i < len(output_folder_list): # if i ==", "#print('debugging') #print('loading new datafile') #print('batch: ', index) #print('new file loaded:', index // self.batches_per_file)", "include checkpoint creation') self.model.model.save(self.output_folder + \"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self): return", "= #print('index') #print('debugging') #print('loading new datafile') #print('batch: ', index) #print('new file loaded:', index", "is not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not None:", "1])) else: raise ValueError(\"Only Dense Layers for now --> check your network config\")", "# Get metadata from loading a test file.... # FILL IN # self.file_shape_dict", "= 'ddm'): assert network_config is not None, 'You need to supply a network", "self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self):", "assert self.save_folder is not None, 'You did not supply a folder for saving", "if i == 0: # tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str +=", "1) X, y = self.__data_generation(batch_ids) return X, y def on_epoch_end(self): 'Updates indexes after", "if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun =", "== 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience = 5, verbose =", "is none of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer =", "+ self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape,", "init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size)", "= 10, save_folder = None, generative_model_id = 'ddm'): assert network_config is not None,", "__data_generation(self, batch_ids = None): 'Generates data containing batch_size samples' # X : (n_samples,", "', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] /", "absolute path # if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder", "np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates data", "__get_optimizer(self): # Adam example here needs optimizer only as a string # We", "= np.empty((self.batch_size, self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y", "path provided, but setting allow_abs_path_folder_generation = False. No folders will be generated.') #", "'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose = 1, patience = 10))", "output_folder_list = self.output_folder.split('/') # # Check if folder string supplied defines a relative", "loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate data batch_ids", "0.0001, min_lr = 0.00000001)) else: print('Provided a string for a callback function that", "\"/\" + self.model.model_id + \"_model_final.h5\") def _get_model(self): return self.model.model # def __try_gen_output_folder(self): #", "import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import load_model from", "= self.model.to_yaml() assert self.save_folder is not None, 'You did not supply a folder", "# return # else: # rel_folder = True # i = 1 #", "print('Found folder: ', tmp_dir_str) # print('Moving on...') # i += 1 # return", "start training from there ! return def train_model(self, save_history = True , verbose", "= [self.file_IDs[k] for k in indexes] if index % self.batches_per_file == 0 or", "model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val", "init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels'])", "self.label_dim = 1 return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel: def", "self.loss_fun = 'mse' return def __get_optimizer(self): # Adam example here needs optimizer only", "self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb'))", "# if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided,", "# output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = '' #", "np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file", "1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return np.load(self.training_data_folder + '/'", "= False, allow_abs_path_folder_generation = False, ): self.train_config = train_config self.model = model self.output_folder", "True , verbose = 1): history = self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val,", "return #return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config =", "print('Provided a string for a callback function that is none of: checkpoint, earlystopping,", "i = 0 # while i < len(output_folder_list): # if i == 0:", "Find list of IDs #file_IDs_temp = [self.file_IDs[k] for k in indexes] if index", "self.batches_per_file == 0 or self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging') #print('loading new", "'/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str = '' # i = 0", "string supplied defines a relative or absolute path # if not output_folder_list[0]: #", "# Generate data batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file)", "dtype = np.float32) y = np.empty((self.batch_size, self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids,", "self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1 return", "self.network_config = network_config self.model = self.__build_model() def __build_model(self): model = keras.Sequential() for i", "self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1, save_best_only = False))", "'_' + generative_model_id self.save_folder = save_folder self.input_shape = input_shape self.network_config = network_config self.model", "problem occured when creating the directory ', tmp_dir_str) # else: # print('Found folder:", "', tmp_dir_str) # print('Creating it...') # try: # os.makedirs(tmp_dir_str) # except: # print('Some", "= True , verbose = 1): history = self.model.model.fit(x = self.data_generator_train, validation_data =", "(n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim), dtype = np.float32) y", "for k in indexes] if index % self.batches_per_file == 0 or self.tmp_data ==", "[self.file_IDs[k] for k in indexes] if index % self.batches_per_file == 0 or self.tmp_data", "replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder +", "> 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1 return #return np.load(self.training_data_folder +", "saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) open(self.save_folder + \"/\" +", "return def __get_optimizer(self): # Adam example here needs optimizer only as a string", "Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog cutoff", "index): 'Generate one batch of data' # Generate indexes of the batch #", "int(self.file_shape_dict['inputs'][0] / self.batch_size) self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1]", "earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics = self.metrics)", "= self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1]", "= False. No folders will be generated.') # return # else: # rel_folder", "= pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape,", "not 'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving final state of the model,", "tmp_dir_str = '' # i = 0 # while i < len(output_folder_list): #", "self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging') #print('loading new datafile') #print('batch: ', index)", "lanfactory.config import import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import", "open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config", "__get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] == 'mse': self.loss_fun", "a network config dict' self.model_id = uuid.uuid1().hex + '_' + generative_model_id self.save_folder =", "for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog", "label_prelog_cutoff_high = None, ): # List physical devices #print(tf.config.list_physical_devices()) # Do I allow", "self.input_dim = self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim =", "path # if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path", "is not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self,", "'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx,", "None, generative_model_id = 'ddm'): assert network_config is not None, 'You need to supply", "= None, data_generator_train = None, data_generator_val = None, model = None, output_folder =", "False, ): self.train_config = train_config self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation =", "self.input_shape = input_shape self.network_config = network_config self.model = self.__build_model() def __build_model(self): model =", "supply a network config dict' self.model_id = uuid.uuid1().hex + '_' + generative_model_id self.save_folder", "# AF-TODO import folder def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber()", "verbose = 1, save_best_only = False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss',", "= self.loss_fun, optimizer = self.optimizer, metrics = self.metrics) def __load_weights(self): # If warmstart", "= 1, patience = 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor", "// self.batches_per_file]) # Generate data batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index", "file loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file]) # Generate data", "else: raise ValueError(\"Only Dense Layers for now --> check your network config\") return", "datafile') #print('batch: ', index) #print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index", "file sizes ? # Initialization self.batch_size = batch_size #self.labels = labels self.file_IDs =", "#return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init", "__init_file_shape(self): init_file = pickle.load(open(self.file_IDs[0], 'rb')) #print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict =", "= train_config self.model = model self.output_folder = output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train =", "self.optimizer, metrics = self.metrics) def __load_weights(self): # If warmstart == True, we load", "pd import psutil import pickle #import kde_info #from lanfactory.config import import tensorflow as", "= 'val_loss', verbose = 1, save_best_only = False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor", "= 1e-7, # label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high = None, ):", "not os.path.exists(tmp_dir_str): # print('Did not find folder: ', tmp_dir_str) # print('Creating it...') #", "== 0 or self.tmp_data == None: #self.tmp_file = #print('index') #print('debugging') #print('loading new datafile')", "= self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = [] for cb_tmp in self.train_config['callbacks']: if", "self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder + '/' + self.model.model_id +", "supply a folder for saving the model' try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation)", "self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self): if self.train_config['loss'] ==", "# if not os.path.exists(tmp_dir_str): # print('Did not find folder: ', tmp_dir_str) # print('Creating", "pd.DataFrame(history.history).to_csv(self.output_folder + \"/\" + self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: #", "new datafile') #print('batch: ', index) #print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index =", "\"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config = None, data_generator_train = None, data_generator_val =", "occured when creating the directory ', tmp_dir_str) # else: # print('Found folder: ',", "self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics'] return", "= uuid.uuid1().hex + '_' + generative_model_id self.save_folder = save_folder self.input_shape = input_shape self.network_config", "= None, warm_start = False, allow_abs_path_folder_generation = False, ): self.train_config = train_config self.model", "DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7,", "'/' + self.file_IDs[0]).shape class KerasModel: def __init__(self, network_config = None, input_shape = 10,", "file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ?", "# X : (n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size, self.input_dim), dtype", "1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i - 1]))", "print('Did not find folder: ', tmp_dir_str) # print('Creating it...') # try: # os.makedirs(tmp_dir_str)", "np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb'))", "a functions or class too if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return", "= output_folder self.allow_abs_path_folder_generation = allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start =", "self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose = 1, patience = 10)) elif", "def __getitem__(self, index): 'Generate one batch of data' # Generate indexes of the", "None): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) #", "can have self.optimizer as a functions or class too if self.train_config['optimizer'] == 'adam':", "self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: # Save Model print('Saving final", "'Denotes the number of batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size))", "[] for cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder +", "'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization", "import numpy as np import uuid import os import pandas as pd import", "of IDs #file_IDs_temp = [self.file_IDs[k] for k in indexes] if index % self.batches_per_file", "model = keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1): if i == 0:", "+ 1): if i == 0: model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i], input_dim = self.input_shape, activation", "- 1])) else: raise ValueError(\"Only Dense Layers for now --> check your network", "or absolute path # if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute", "# warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No folders will", "#tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if", "from tensorflow.keras.models import load_model from tensorflow.python.client import device_lib import warnings from lanfactory.utils import", "if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None): 'Generates data containing", "- 1], activation = self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense Layers for", "\"/\" + self.model.model_id + \"_training_history.csv\") if not 'checkpoint' in self.train_config['callbacks']: # Save Model", "1 # else: # rel_folder = False # i = 0 # #", "history = self.model.model.fit(x = self.data_generator_train, validation_data = self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks =", "cb_tmp in self.train_config['callbacks']: if cb_tmp == 'checkpoint': ckpt_file_name = self.output_folder + '/' +", "of the batch # Find list of IDs #file_IDs_temp = [self.file_IDs[k] for k", "functions or class too if self.train_config['optimizer'] == 'adam': self.optimizer = 'adam' return def", "Save Model print('Saving final state of the model, since callbacks did not include", "self.metrics = self.train_config['metrics'] return def __get_callbacks(self): self.cb_list = [] for cb_tmp in self.train_config['callbacks']:", "= self.output_folder + '/' + self.model.model_id + '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose", "= False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose", "y def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle", "0.00000001)) else: print('Provided a string for a callback function that is none of:", "self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No folders", "elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose = 1,", "# except: # print('Some problem occured when creating the directory ', tmp_dir_str) #", "shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low self.label_prelog_cutoff_high = label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None", "#file_IDs_temp = [self.file_IDs[k] for k in indexes] if index % self.batches_per_file == 0", "We can have self.optimizer as a functions or class too if self.train_config['optimizer'] ==", "= self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is", "tensorflow.keras.models import load_model from tensorflow.python.client import device_lib import warnings from lanfactory.utils import try_gen_folder", "is not None, 'You need to supply a network config dict' self.model_id =", "'You need to supply a network config dict' self.model_id = uuid.uuid1().hex + '_'", "len(output_folder_list): # if i == 0: # tmp_dir_str += output_folder_list[i] # else: #", "= self.file_shape_dict['inputs'][1] if len(self.file_shape_dict['labels']) > 1: self.label_dim = self.file_shape_dict['labels'][1] else: self.label_dim = 1", "input file sizes ? # Initialization self.batch_size = batch_size #self.labels = labels self.file_IDs", "data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization X", "size = self.tmp_data['data'].shape[0], replace = True) self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx]", "input_dim = self.input_shape, activation = self.network_config['activations'][i])) else: if self.network_config['layer_types'][i - 1] == 'dense':", "'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation = self.network_config['activations'][i - 1])) else: raise", "else: if self.network_config['layer_types'][i - 1] == 'dense': model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1], activation", "self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose, ) if save_history: pd.DataFrame(history.history).to_csv(self.output_folder + \"/\"", "= '' # i = 0 # while i < len(output_folder_list): # if", "if rel_folder: # output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0) # tmp_dir_str =", "? # Initialization self.batch_size = batch_size #self.labels = labels self.file_IDs = file_IDs self.shuffle", "will be generated.') # return # else: # rel_folder = True # i", "# else: # i += 1 # if rel_folder: # output_folder_list[1] = '/'", "', tmp_dir_str) # else: # print('Found folder: ', tmp_dir_str) # print('Moving on...') #", "len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i) # else: # i += 1", "return model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml() assert self.save_folder is", "', index) #print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index // self.batches_per_file])", "metrics = self.metrics) def __load_weights(self): # If warmstart == True, we load model", "1, save_best_only = False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta =", "return def train_model(self, save_history = True , verbose = 1): history = self.model.model.fit(x", "def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0])", "spec = self.model.to_yaml() assert self.save_folder is not None, 'You did not supply a", "X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low", "# Initialization self.batch_size = batch_size #self.labels = labels self.file_IDs = file_IDs self.shuffle =", "device_lib import warnings from lanfactory.utils import try_gen_folder class DataGenerator(keras.utils.Sequence): 'Generates data for Keras'", ":] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not None:", "# tmp_dir_str += output_folder_list[i] # else: # tmp_dir_str += '/' + output_folder_list[i] #", "'/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did not find folder: ',", "self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1] if self.label_prelog_cutoff_low is not", "= allow_abs_path_folder_generation) open(self.save_folder + \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def", "#print('index') #print('debugging') #print('loading new datafile') #print('batch: ', index) #print('new file loaded:', index //", "else: # i += 1 # if rel_folder: # output_folder_list[1] = '/' +", "save_folder self.input_shape = input_shape self.network_config = network_config self.model = self.__build_model() def __build_model(self): model", "pickle #import kde_info #from lanfactory.config import import tensorflow as tf from tensorflow import", "np.empty((self.batch_size, self.label_dim), dtype = np.float32) X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1] y =", "warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics() self.__get_callbacks() self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation)", "checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun, optimizer = self.optimizer, metrics =", "self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, batch_ids = None):", "import import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import load_model", "batch_size #self.labels = labels self.file_IDs = file_IDs self.shuffle = shuffle self.label_prelog_cutoff_low = label_prelog_cutoff_low", "= None # Get metadata from loading a test file.... # FILL IN", "file.... # FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the", "self.tmp_data['data'][shuffle_idx, :] self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx] #return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index])) def __init_file_shape(self):", "label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ? label_prelog_cutoff_high = None,", "X, y def __load_file(self, file_index): self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb')) shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size", "devices #print(tf.config.list_physical_devices()) # Do I allow for arbitrary input file sizes ? #", "self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense Layers for now --> check your", "def _get_model(self): return self.model.model # def __try_gen_output_folder(self): # output_folder_list = self.output_folder.split('/') # #", "from loading a test file.... # FILL IN # self.file_shape_dict = self.__init_file_shape() self.on_epoch_end()", "activation = self.network_config['activations'][i - 1])) else: raise ValueError(\"Only Dense Layers for now -->", "for now --> check your network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation =", "as a string # We can have self.optimizer as a functions or class", "self.__compile_model() self.__load_weights() try_gen_folder(folder = self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder def", "#print('batch: ', index) #print('new file loaded:', index // self.batches_per_file) self.__load_file(file_index = self.indexes[index //", "False)) elif cb_tmp == 'earlystopping': self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, verbose =", "function that is none of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss = self.loss_fun,", "# i = 0 # # # while i < len(output_folder_list): # if", "= self.output_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self): if self.train_config['loss']", "# # while i < len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i) #", "sizes ? # Initialization self.batch_size = batch_size #self.labels = labels self.file_IDs = file_IDs", "there ! return def train_model(self, save_history = True , verbose = 1): history", "else: # rel_folder = False # i = 0 # # # while", "# i += 1 # if rel_folder: # output_folder_list[1] = '/' + output_folder_list[1]", "check your network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec =", "+ '_ckpt.h5' self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name, monitor = 'val_loss', verbose = 1, save_best_only = False)) elif", "in indexes] if index % self.batches_per_file == 0 or self.tmp_data == None: #self.tmp_file", "tensorflow as tf from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.python.client", "__init__(self, network_config = None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'):", "== 'adam': self.optimizer = 'adam' return def __get_metrics(self): self.metrics = self.train_config['metrics'] return def", "callback function that is none of: checkpoint, earlystopping, reducelr') def __compile_model(self): self.model.model.compile(loss =", "self.label_prelog_cutoff_low is not None: y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low) if self.label_prelog_cutoff_high is not", "+= '/' + output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did not find folder:", "I allow for arbitrary input file sizes ? # Initialization self.batch_size = batch_size", "relative or absolute path # if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: #", "self.label_prelog_cutoff_high is not None: y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high) return X, y def", "if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation: # warnings.warn('Absolute folder path provided, but", "allow_abs_path_folder_generation) # AF-TODO import folder def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun =", "self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1, patience = 5, verbose = 1, min_delta", "= self.data_generator_val, epochs = self.train_config['n_epochs'], callbacks = self.cb_list, verbose = verbose, ) if", "data' # Generate indexes of the batch # Find list of IDs #file_IDs_temp", "folder def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss'] ==", "epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate one batch", "+ \"/\" + self.model_id + \"_model_spec.yaml\", \"w\").write(spec) class ModelTrainerKerasSeq: def __init__(self, train_config =", "# else: # rel_folder = True # i = 1 # else: #", "= 0, verbose = 1, patience = 10)) elif cb_tmp == 'reducelr': self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor", "def __init__(self, file_IDs, batch_size=32, shuffle=True, label_prelog_cutoff_low = 1e-7, # label prelog cutoff -->", "per epoch' return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size)) def __getitem__(self, index): 'Generate one", "1) * self.batch_size, 1) X, y = self.__data_generation(batch_ids) return X, y def on_epoch_end(self):", "have self.optimizer as a functions or class too if self.train_config['optimizer'] == 'adam': self.optimizer", "allow_abs_path_folder_generation = False, ): self.train_config = train_config self.model = model self.output_folder = output_folder", "provided, but setting allow_abs_path_folder_generation = False. No folders will be generated.') # return", "< len(output_folder_list): # if not output_folder_list[i]: # output_folder_list.pop(i) # else: # i +=", "after each epoch' self.indexes = np.arange(len(self.file_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self,", "np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1)", "a relative or absolute path # if not output_folder_list[0]: # if not self.allow_abs_path_folder_generation:", "# if not output_folder_list[i]: # output_folder_list.pop(i) # else: # i += 1 #", "import folder def __get_loss(self): if self.train_config['loss'] == 'huber': self.loss_fun = tf.keras.losses.Huber() elif self.train_config['loss']", "file shape: ', init_file['data'].shape, init_file['labels'].shape) self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape} self.batches_per_file =", "1 # if rel_folder: # output_folder_list[1] = '/' + output_folder_list[1] # output_folder_list.pop(0) #", "def __build_model(self): model = keras.Sequential() for i in range(len(self.network_config['layer_sizes']) + 1): if i", "output_folder_list[i] # if not os.path.exists(tmp_dir_str): # print('Did not find folder: ', tmp_dir_str) #", "allow_abs_path_folder_generation self.data_generator_train = data_generator_train self.data_generator_val = data_generator_val self.warm_start = warm_start self.__get_loss() self.__get_optimizer() self.__get_metrics()", "= label_prelog_cutoff_high #self.training_data_folder = training_data_folder self.tmp_data = None # Get metadata from loading", "self.__init_file_shape() self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor((len(self.file_IDs)", "generated.') # return # else: # rel_folder = True # i = 1", "your network config\") return model def _save_model_yaml(self, allow_abs_path_folder_generation = False): spec = self.model.to_yaml()" ]
[ "= \"\"\" CREATE TABLE people ( id int primary key, name text, dob", "?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1,", "date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id, name, dob,", "import pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app =", "CREATE TABLE people ( id int primary key, name text, dob date, number_of_pets", "TABLE people ( id int primary key, name text, dob date, number_of_pets int)", "@pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL", "?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom',", "from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield", "import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI)", "int primary key, name text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\"", "connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine", "3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine = create_engine(config.SQLA_URI) meta =", "create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE", "app import make_app import pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app():", "make_app import pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app", "create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def", "'1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine = create_engine(config.SQLA_URI) meta", "import make_app import pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI)", "primary key, name text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT", "int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id, name, dob, number_of_pets) VALUES", "'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2)", "1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24',", "\"\"\" CREATE TABLE people ( id int primary key, name text, dob date,", "text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id,", "drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people ( id int primary", "from app import make_app import pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def", "connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry',", "people ( id int primary key, name text, dob date, number_of_pets int) \"\"\"", "pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app()", "people (id, name, dob, number_of_pets) VALUES (?, ?, ?, ?) \"\"\" engine =", "import config from app import make_app import pytest from sqlalchemy import create_engine, MetaData", "sqlalchemy import create_engine, MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app", "MetaData @pytest.yield_fixture def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri):", "create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick',", "2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine =", "key, name text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO", "name, dob, number_of_pets) VALUES (?, ?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection", "= engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3)", "number_of_pets) VALUES (?, ?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect()", "engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL,", "3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine = create_engine(config.SQLA_URI) meta = MetaData(bind=engine) meta.reflect()", "= create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2,", "'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine = create_engine(config.SQLA_URI) meta = MetaData(bind=engine) meta.reflect() meta.drop_all()", "= \"\"\" INSERT INTO people (id, name, dob, number_of_pets) VALUES (?, ?, ?,", "INSERT INTO people (id, name, dob, number_of_pets) VALUES (?, ?, ?, ?) \"\"\"", "\"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id, name, dob, number_of_pets) VALUES (?,", "dob, number_of_pets) VALUES (?, ?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection =", "VALUES (?, ?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL)", "app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\"", "id int primary key, name text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL =", "connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3,", "0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri):", "( id int primary key, name text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL", "connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14',", "def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people ( id int primary key,", "engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL,", "create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people ( id int primary key, name", "autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE", "number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id, name, dob, number_of_pets)", "autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people ( id int", "make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people (", "'1980-02-26', 0) connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def", "connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine = create_engine(config.SQLA_URI) meta = MetaData(bind=engine)", "def app(): create_db(config.SQLA_URI) autoapi_app = make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL =", "?) \"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26',", "\"\"\" INSERT INTO people (id, name, dob, number_of_pets) VALUES (?, ?, ?, ?)", "(id, name, dob, number_of_pets) VALUES (?, ?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI)", "CREATE_TABLE_SQL = \"\"\" CREATE TABLE people ( id int primary key, name text,", "'Dick', '1982-03-14', 3) connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2) def drop_db(sqlalchemy_uri): engine = create_engine(config.SQLA_URI)", "yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people ( id", "\"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0)", "config from app import make_app import pytest from sqlalchemy import create_engine, MetaData @pytest.yield_fixture", "INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id, name, dob, number_of_pets) VALUES (?, ?,", "name text, dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people", "INTO people (id, name, dob, number_of_pets) VALUES (?, ?, ?, ?) \"\"\" engine", "<reponame>zacharycohn/pants<filename>conftest.py<gh_stars>10-100 import config from app import make_app import pytest from sqlalchemy import create_engine,", "= make_app() yield autoapi_app drop_db(config.SQLA_URI) def create_db(sqlalchemy_uri): CREATE_TABLE_SQL = \"\"\" CREATE TABLE people", "(?, ?, ?, ?) \"\"\" engine = create_engine(config.SQLA_URI) connection = engine.connect() connection.execute(CREATE_TABLE_SQL) connection.execute(INSERT_TABLE_SQL,", "dob date, number_of_pets int) \"\"\" INSERT_TABLE_SQL = \"\"\" INSERT INTO people (id, name," ]
[ "NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the", "<reponame>dclong/config \"\"\"Test the misc module. \"\"\" import subprocess as sp def test_nomachine(): \"\"\"Test", "check=True) def test_version(): \"\"\"Test the version command. \"\"\" cmd = \"xinstall version\" sp.run(cmd,", "module. \"\"\" import subprocess as sp def test_nomachine(): \"\"\"Test installing and configuring NoMachine.", "sp def test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\"", "sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the version command. \"\"\" cmd = \"xinstall", "test_version(): \"\"\"Test the version command. \"\"\" cmd = \"xinstall version\" sp.run(cmd, shell=True, check=True)", "\"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the version", "\"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the version command. \"\"\" cmd", "def test_version(): \"\"\"Test the version command. \"\"\" cmd = \"xinstall version\" sp.run(cmd, shell=True,", "shell=True, check=True) def test_version(): \"\"\"Test the version command. \"\"\" cmd = \"xinstall version\"", "nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the version command. \"\"\" cmd =", "subprocess as sp def test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\" cmd =", "\"\"\"Test the misc module. \"\"\" import subprocess as sp def test_nomachine(): \"\"\"Test installing", "misc module. \"\"\" import subprocess as sp def test_nomachine(): \"\"\"Test installing and configuring", "\"\"\" import subprocess as sp def test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\"", "\"\"\"Test installing and configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True)", "configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test", "= \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the version command. \"\"\"", "the misc module. \"\"\" import subprocess as sp def test_nomachine(): \"\"\"Test installing and", "and configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version():", "as sp def test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\" cmd = \"xinstall", "installing and configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def", "cmd = \"xinstall nomachine\" sp.run(cmd, shell=True, check=True) def test_version(): \"\"\"Test the version command.", "import subprocess as sp def test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\" cmd", "def test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd,", "test_nomachine(): \"\"\"Test installing and configuring NoMachine. \"\"\" cmd = \"xinstall nomachine\" sp.run(cmd, shell=True," ]
[ "direction in directions: fly_from = direction[0] fly_to = direction[1] date_from = datetime.datetime.now() date_to", "if r.status_code == 200: print('success!!!') results = r.json().get('data') print(len(results)) for r in results:", "изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет не валиден') response = {'messages': messages,", "валиден') response = {'messages': messages, 'status': 'ERR'} if not len(messages): response['status'] = 'OK'", "200: result = r.json() messages = [] if result.get('price_change') == True: messages.append('Цена изменена')", "datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data)", "Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result):", "= datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to':", "datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner':", "direction } flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in directions: fly_from", "response = { 'empty': True } if booking_token is None: return response data", "'booking_token': booking_token, 'bnum': 3, 'pnum': 2 } r = requests.get(CHECK_API, params=data) if r.status_code", "requests import datetime from aviata.flights.consts import directions from aviata.flights.models import Direction, Flight SEARCH_API", "= 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts = result.get('dTime')", "from aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER =", "'booking_token': result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for direction", "date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API, params=data) if r.status_code ==", "aviata.flights.consts import directions from aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API =", "direction[1] date_from = datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data = { 'flyFrom':", "3, 'pnum': 2 } r = requests.get(CHECK_API, params=data) if r.status_code == 200: result", "'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data) print(flight.id) def get_flights():", "'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction }", ") data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price':", "{ 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'),", "booking_token is None: return response data = { 'v': 2, 'booking_token': booking_token, 'bnum':", "{ 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r", "aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky'", "r.json().get('data') print(len(results)) for r in results: create_flight(r) def check_flight(booking_token=None): response = { 'empty':", "messages.append('Данный перелет не валиден') response = {'messages': messages, 'status': 'ERR'} if not len(messages):", "messages = [] if result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid') == True:", "directions from aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER", "if result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет не", "if booking_token is None: return response data = { 'v': 2, 'booking_token': booking_token,", "= r.json().get('data') print(len(results)) for r in results: create_flight(r) def check_flight(booking_token=None): response = {", "results: create_flight(r) def check_flight(booking_token=None): response = { 'empty': True } if booking_token is", "result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction", "= r.json() messages = [] if result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid')", "if result.get('flights_invalid') == True: messages.append('Данный перелет не валиден') response = {'messages': messages, 'status':", "== True: messages.append('Данный перелет не валиден') response = {'messages': messages, 'status': 'ERR'} if", "200: print('success!!!') results = r.json().get('data') print(len(results)) for r in results: create_flight(r) def check_flight(booking_token=None):", "True: messages.append('Данный перелет не валиден') response = {'messages': messages, 'status': 'ERR'} if not", "requests.get(CHECK_API, params=data) if r.status_code == 200: result = r.json() messages = [] if", "True } if booking_token is None: return response data = { 'v': 2,", "'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts", "import requests import datetime from aviata.flights.consts import directions from aviata.flights.models import Direction, Flight", "= requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!') results = r.json().get('data') print(len(results)) for", "2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2 } r = requests.get(CHECK_API, params=data) if", "'partner': PARTNER } r = requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!') results", "= { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token':", "booking_token, 'bnum': 3, 'pnum': 2 } r = requests.get(CHECK_API, params=data) if r.status_code ==", "result.get('flights_invalid') == True: messages.append('Данный перелет не валиден') response = {'messages': messages, 'status': 'ERR'}", "in directions: fly_from = direction[0] fly_to = direction[1] date_from = datetime.datetime.now() date_to =", "direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts),", "print('success!!!') results = r.json().get('data') print(len(results)) for r in results: create_flight(r) def check_flight(booking_token=None): response", "} r = requests.get(CHECK_API, params=data) if r.status_code == 200: result = r.json() messages", "data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'),", "'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r =", "} r = requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!') results = r.json().get('data')", "r in results: create_flight(r) def check_flight(booking_token=None): response = { 'empty': True } if", "PARTNER = 'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction =", "= 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime')", "for r in results: create_flight(r) def check_flight(booking_token=None): response = { 'empty': True }", "response = {'messages': messages, 'status': 'ERR'} if not len(messages): response['status'] = 'OK' return", "} flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in directions: fly_from =", "create_flight(r) def check_flight(booking_token=None): response = { 'empty': True } if booking_token is None:", "SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts =", "= {'messages': messages, 'status': 'ERR'} if not len(messages): response['status'] = 'OK' return response", "'pnum': 2 } r = requests.get(CHECK_API, params=data) if r.status_code == 200: result =", "import directions from aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?'", "= requests.get(CHECK_API, params=data) if r.status_code == 200: result = r.json() messages = []", "r = requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!') results = r.json().get('data') print(len(results))", "def check_flight(booking_token=None): response = { 'empty': True } if booking_token is None: return", "Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts),", "= { 'empty': True } if booking_token is None: return response data =", "== 200: result = r.json() messages = [] if result.get('price_change') == True: messages.append('Цена", "direction[0] fly_to = direction[1] date_from = datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data", "is None: return response data = { 'v': 2, 'booking_token': booking_token, 'bnum': 3,", "in results: create_flight(r) def check_flight(booking_token=None): response = { 'empty': True } if booking_token", "directions: fly_from = direction[0] fly_to = direction[1] date_from = datetime.datetime.now() date_to = date_from", "import datetime from aviata.flights.consts import directions from aviata.flights.models import Direction, Flight SEARCH_API =", "data = { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER", "True: messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет не валиден') response =", "Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in directions: fly_from = direction[0] fly_to =", "'bnum': 3, 'pnum': 2 } r = requests.get(CHECK_API, params=data) if r.status_code == 200:", "перелет не валиден') response = {'messages': messages, 'status': 'ERR'} if not len(messages): response['status']", "import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def", "r.status_code == 200: print('success!!!') results = r.json().get('data') print(len(results)) for r in results: create_flight(r)", "def get_flights(): for direction in directions: fly_from = direction[0] fly_to = direction[1] date_from", "datetime from aviata.flights.consts import directions from aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights'", "= { 'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2 } r =", "fly_to = direction[1] date_from = datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data =", "flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data", "result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data) print(flight.id) def", "'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'),", "r.status_code == 200: result = r.json() messages = [] if result.get('price_change') == True:", "params=data) if r.status_code == 200: result = r.json() messages = [] if result.get('price_change')", "data = { 'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2 } r", "requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!') results = r.json().get('data') print(len(results)) for r", "result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = {", "'direction': direction } flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in directions:", "= [] if result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный", "result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for", "== 200: print('success!!!') results = r.json().get('data') print(len(results)) for r in results: create_flight(r) def", "fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration':", "check_flight(booking_token=None): response = { 'empty': True } if booking_token is None: return response", "datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight", "date_from + datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo':", "get_flights(): for direction in directions: fly_from = direction[0] fly_to = direction[1] date_from =", "PARTNER } r = requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!') results =", "def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo')", "if r.status_code == 200: result = r.json() messages = [] if result.get('price_change') ==", "fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API,", "results = r.json().get('data') print(len(results)) for r in results: create_flight(r) def check_flight(booking_token=None): response =", "+ datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"),", "r = requests.get(CHECK_API, params=data) if r.status_code == 200: result = r.json() messages =", "'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2 } r = requests.get(CHECK_API, params=data)", "{ 'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2 } r = requests.get(CHECK_API,", "arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id':", "= result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data =", "fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'),", "{ 'empty': True } if booking_token is None: return response data = {", "'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API, params=data) if r.status_code == 200:", "date_from = datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data = { 'flyFrom': fly_from,", "response data = { 'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2 }", "for direction in directions: fly_from = direction[0] fly_to = direction[1] date_from = datetime.datetime.now()", "'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction", "messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет не валиден') response = {'messages':", "return response data = { 'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum': 2", "create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') )", "= date_from + datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"),", "print(len(results)) for r in results: create_flight(r) def check_flight(booking_token=None): response = { 'empty': True", "= 'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts = result.get('aTime') direction = Direction.objects.get(", "result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'), 'flight_time':", "== True: messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет не валиден') response", "'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API, params=data)", "date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API, params=data) if r.status_code == 200: print('success!!!')", "print(flight.id) def get_flights(): for direction in directions: fly_from = direction[0] fly_to = direction[1]", "params=data) if r.status_code == 200: print('success!!!') results = r.json().get('data') print(len(results)) for r in", "2 } r = requests.get(CHECK_API, params=data) if r.status_code == 200: result = r.json()", "= Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time':", "Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts", "fly_from = direction[0] fly_to = direction[1] date_from = datetime.datetime.now() date_to = date_from +", "result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет не валиден')", "не валиден') response = {'messages': messages, 'status': 'ERR'} if not len(messages): response['status'] =", "result = r.json() messages = [] if result.get('price_change') == True: messages.append('Цена изменена') if", "'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API, params=data) if r.status_code", "None: return response data = { 'v': 2, 'booking_token': booking_token, 'bnum': 3, 'pnum':", "= { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER }", "result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in", "= Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in directions: fly_from = direction[0] fly_to", "= direction[0] fly_to = direction[1] date_from = datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30)", "= direction[1] date_from = datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data = {", "'empty': True } if booking_token is None: return response data = { 'v':", "= result.get('aTime') direction = Direction.objects.get( fly_from__code=result.get('cityCodeFrom'), fly_to__code=result.get('cityCodeTo') ) data = { 'flight_id': result.get('id'),", "'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight =", "fly_to, 'dateFrom': date_from.strftime(\"%d/%m/%Y\"), 'dateTo': date_to.strftime(\"%d/%m/%Y\"), 'partner': PARTNER } r = requests.get(SEARCH_API, params=data) if", "<gh_stars>0 import requests import datetime from aviata.flights.consts import directions from aviata.flights.models import Direction,", "'flight_id': result.get('id'), 'flight_time': datetime.datetime.fromtimestamp(flight_time_ts), 'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts), 'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction':", "date_to = date_from + datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to': fly_to, 'dateFrom':", "CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?' PARTNER = 'picky' def create_flight(result): flight_time_ts = result.get('dTime') arrival_time_ts =", "} if booking_token is None: return response data = { 'v': 2, 'booking_token':", "flight = Flight.objects.create(**data) print(flight.id) def get_flights(): for direction in directions: fly_from = direction[0]", "r.json() messages = [] if result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid') ==", "from aviata.flights.consts import directions from aviata.flights.models import Direction, Flight SEARCH_API = 'https://api.skypicker.com/flights' CHECK_API", "'fly_duration': result.get('fly_duration'), 'price': result.get('price'), 'booking_token': result.get('booking_token'), 'direction': direction } flight = Flight.objects.create(**data) print(flight.id)", "[] if result.get('price_change') == True: messages.append('Цена изменена') if result.get('flights_invalid') == True: messages.append('Данный перелет", "datetime.datetime.now() date_to = date_from + datetime.timedelta(days=30) data = { 'flyFrom': fly_from, 'to': fly_to," ]
[ "no credentials\" client = api_client() client.auth = None response = client.get(\"/get\") assert response.status_code", "application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code == 200 assert", "response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if", "Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import Service from testsuite.utils", "auth made has to pass as expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'],", "\"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set", "requires credentials (app_id, app_key) to be passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb", "be passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources", "== 200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials", "def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic HTTP Auth using app id", "Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import Service from testsuite.utils import basic_auth_string", "to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to", "app key Configure Api/Service to use App ID / App Key Authentication and", "Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to app_id/app_key\"", "with Basic HTTP Auth using app id and app key Configure Api/Service to", "to pass as expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response =", "test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed wrong way\" client = api_client() client.auth", "as expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert", "import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to", "expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed wrong way\" client =", "from threescale_api.resources import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth", "@pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\")", "response = api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application,", "HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access", "Auth using app id and app key Configure Api/Service to use App ID", "api_client): \"Forbid access if credentials passed wrong way\" client = api_client() client.auth =", "Service requires credentials (app_id, app_key) to be passed using the Basic Auth Rewrite", "pass as expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get')", "way\" client = api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code ==", "testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY})", "import pytest from threescale_api.resources import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings):", "wrong way\" client = api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code", "app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization'", "service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic HTTP Auth using", "if credentials passed wrong way\" client = api_client() client.auth = application.authobj(location=\"query\") response =", "appropriate Basic auth made has to pass as expected\"\"\" creds = application.authobj().credentials expected_authorization", "access with Basic HTTP Auth using app id and app key Configure Api/Service", "creds['app_key']) response = api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization def", "== 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client = api_client() client.auth", "def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return", "Api/Service to use App ID / App Key Authentication and Basic HTTP Auth", "app id and app key Configure Api/Service to use App ID / App", "expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code", "Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic HTTP", "= api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client):", "\"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic HTTP", "expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"]", "api_client): \"\"\"Test client access with Basic HTTP Auth using app id and app", "Basic HTTP Auth to pass the credentials. Then request made with appropriate Basic", "HTTP Auth to pass the credentials. Then request made with appropriate Basic auth", "service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic", "passed wrong way\" client = api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert", "Basic auth made has to pass as expected\"\"\" creds = application.authobj().credentials expected_authorization =", "passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import", "@pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic HTTP Auth using app", "/ App Key Authentication and Basic HTTP Auth to pass the credentials. Then", "the credentials. Then request made with appropriate Basic auth made has to pass", "use App ID / App Key Authentication and Basic HTTP Auth to pass", "location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application,", "to be passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from", "request made with appropriate Basic auth made has to pass as expected\"\"\" creds", "the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import Service from", "client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid", "basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization", "key Configure Api/Service to use App ID / App Key Authentication and Basic", "\"\"\" import pytest from threescale_api.resources import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def", "Authentication and Basic HTTP Auth to pass the credentials. Then request made with", "service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\":", "api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid", "App ID / App Key Authentication and Basic HTTP Auth to pass the", "\"\"\"Test client access with Basic HTTP Auth using app id and app key", "App Key Authentication and Basic HTTP Auth to pass the credentials. Then request", "import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return", "response = client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no", "using app id and app key Configure Api/Service to use App ID /", "= basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code == 200 assert response.request.headers[\"Authorization\"] ==", "has to pass as expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response", "\"Set credentials location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke", "\"Forbid access if no credentials\" client = api_client() client.auth = None response =", "200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed", "app_key) to be passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest", "client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client", "(app_id, app_key) to be passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import", "from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\":", "== expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed wrong way\" client", "auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials", "return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic HTTP Auth", "ID / App Key Authentication and Basic HTTP Auth to pass the credentials.", "'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test", "client = api_client() client.auth = None response = client.get(\"/get\") assert response.status_code == 403", "return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic HTTP auth)\"", "service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings):", "mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location", "credentials\" client = api_client() client.auth = None response = client.get(\"/get\") assert response.status_code ==", "with appropriate Basic auth made has to pass as expected\"\"\" creds = application.authobj().credentials", "(Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client", "= client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\"", "Auth to pass the credentials. Then request made with appropriate Basic auth made", "def service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def", "test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with Basic HTTP Auth using app id and", "Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import Service from testsuite.utils import", "pass the credentials. Then request made with appropriate Basic auth made has to", "def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client = api_client() client.auth = None", "Then request made with appropriate Basic auth made has to pass as expected\"\"\"", "= api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code == 403 def", "application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if", "assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed wrong", "response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client = api_client()", "= application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code == 200", "pytest from threescale_api.resources import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set", "to use App ID / App Key Authentication and Basic HTTP Auth to", "\"\"\" Service requires credentials (app_id, app_key) to be passed using the Basic Auth", "./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\")", "and Basic HTTP Auth to pass the credentials. Then request made with appropriate", "= application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access", "response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed wrong way\"", "id and app key Configure Api/Service to use App ID / App Key", "HTTP Auth using app id and app key Configure Api/Service to use App", "and app key Configure Api/Service to use App ID / App Key Authentication", "client = api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code == 403", "credentials passed wrong way\" client = api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\")", "credentials. Then request made with appropriate Basic auth made has to pass as", "made with appropriate Basic auth made has to pass as expected\"\"\" creds =", "test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client = api_client() client.auth = None response", "@pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"})", "if no credentials\" client = api_client() client.auth = None response = client.get(\"/get\") assert", "assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client =", "auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client): \"\"\"Test client access with", "api_client() client.auth = application.authobj(location=\"query\") response = client.get(\"/get\") assert response.status_code == 403 def test_basic_auth_app_id_403_without_auth(api_client):", "assert response.status_code == 200 assert response.request.headers[\"Authorization\"] == expected_authorization def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access", "made has to pass as expected\"\"\" creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key'])", "threescale_api.resources import Service from testsuite.utils import basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode", "\"Forbid access if credentials passed wrong way\" client = api_client() client.auth = application.authobj(location=\"query\")", "access if no credentials\" client = api_client() client.auth = None response = client.get(\"/get\")", "basic_auth_string @pytest.fixture(scope=\"module\") def service_settings(service_settings): \"Set auth mode to app_id/app_key\" service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings", "service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings", "to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def test_basic_auth_app_id_key(application, api_client):", "403 def test_basic_auth_app_id_403_without_auth(api_client): \"Forbid access if no credentials\" client = api_client() client.auth =", "Configure Api/Service to use App ID / App Key Authentication and Basic HTTP", "creds = application.authobj().credentials expected_authorization = basic_auth_string(creds['app_id'], creds['app_key']) response = api_client().get('/get') assert response.status_code ==", "credentials (app_id, app_key) to be passed using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\"", "Key Authentication and Basic HTTP Auth to pass the credentials. Then request made", "to pass the credentials. Then request made with appropriate Basic auth made has", "credentials location to 'authorization' (Basic HTTP auth)\" service_proxy_settings.update({\"credentials_location\": \"authorization\"}) return service_proxy_settings @pytest.mark.smoke def", "access if credentials passed wrong way\" client = api_client() client.auth = application.authobj(location=\"query\") response", "client access with Basic HTTP Auth using app id and app key Configure", "Basic HTTP Auth using app id and app key Configure Api/Service to use", "using the Basic Auth Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb \"\"\" import pytest from threescale_api.resources import Service", "def test_basic_auth_app_id_403_with_query(application, api_client): \"Forbid access if credentials passed wrong way\" client = api_client()", "service_settings.update({\"backend_version\": Service.AUTH_APP_ID_KEY}) return service_settings @pytest.fixture(scope=\"module\") def service_proxy_settings(service_proxy_settings): \"Set credentials location to 'authorization' (Basic" ]
[ "from __future__ import unicode_literals from markdown import markdown as markdown_ def dateformat(date): if", "def dateformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not", "if not date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return", "dateformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date:", "return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d", "date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if not text: return", "as markdown_ def dateformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date):", "__future__ import unicode_literals from markdown import markdown as markdown_ def dateformat(date): if not", "unicode_literals from markdown import markdown as markdown_ def dateformat(date): if not date: return", "if not date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if not", "date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def", "markdown import markdown as markdown_ def dateformat(date): if not date: return \"\" return", "not date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if not text:", "return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if not text: return \"\" return markdown_(text)", "date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return \"\" return", "\"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if not text: return \"\" return", "from markdown import markdown as markdown_ def dateformat(date): if not date: return \"\"", "markdown_ def dateformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if", "return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if not text: return \"\"", "not date: return \"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return \"\"", "coding=utf-8 from __future__ import unicode_literals from markdown import markdown as markdown_ def dateformat(date):", "markdown as markdown_ def dateformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d') def", "def datetimeformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text):", "\"\" return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d %I:%M", "import markdown as markdown_ def dateformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d')", "datetimeformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p') def markdown(text): if", "import unicode_literals from markdown import markdown as markdown_ def dateformat(date): if not date:", "# coding=utf-8 from __future__ import unicode_literals from markdown import markdown as markdown_ def", "return date.strftime('%Y-%m-%d') def datetimeformat(date): if not date: return \"\" return date.strftime('%Y-%m-%d %I:%M %p')" ]
[ "merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params) #", "eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd", "0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps", "f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak", "ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2,", "# transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd =", "_index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists def fig2_seb(path,", "matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"], fig, ax)", "PiEK = PiEKr + PiEKd PiEA = PiEAr + PiEAd PiE = PiEK", "import os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where, _k_f,", "exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"], fig, ax) # , t_start=20)", "= f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) *", "__name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots()", "PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0,", "matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim", "f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) *", "_k_f(sim.params) # eps = _eps(sim, t_start) eps, E, ts, tmax = epsetstmax(path) if", "set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None):", "ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax", "pl import fluidsim as fls import os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv", "paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file =", "eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr", "PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2,", "print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd PiEA = PiEAr +", "sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK =", "cumsum_inv from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths import", "_k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None,", "(f[\"khE\"][...] + 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0)", "PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd)", "* sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd", "* sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK", "ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\")", "t_start) eps, E, ts, tmax = epsetstmax(path) if t_start is None: t_start =", "= fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f =", "PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd =", "= exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"], fig, ax) # ,", "transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0)", "epsetstmax from paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim =", "eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr =", "PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if", "if __name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax =", "sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd", "from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax", "t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\")", "\"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps = _eps(sim, t_start)", "exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path,", "ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA,", "ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd PiEA = PiEAr + PiEAd", "set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"], fig, ax) # , t_start=20) pl.savefig(path_fig)", "ls=\":\") PiEK = PiEKr + PiEKd PiEA = PiEAr + PiEAd PiE =", "t_start) khE = (f[\"khE\"][...] + 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps", "* sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd PiEA =", "fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params)", "import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file", "PiEKd PiEA = PiEAr + PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\")", "+ PiEKd PiEA = PiEAr + PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\")", "linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__)", "khE = (f[\"khE\"][...] + 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd", "= f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) /", "= cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd", "fls import os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where,", "import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists def", "= PiEKr + PiEKd PiEA = PiEAr + PiEAd PiE = PiEK +", "PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\",", "label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1])", "sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps)", "\"r\") k_f = _k_f(sim.params) # eps = _eps(sim, t_start) eps, E, ts, tmax", "cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak", "transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd =", "* sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak", "sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd =", "if t_start is None: t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start) khE =", "+ PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK,", "label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5,", "= ts imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1) / k_f", "ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc()", "= epsetstmax(path) if t_start is None: t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start)", "color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd PiEA = PiEAr + PiEAd PiE", "# eps = _eps(sim, t_start) eps, E, ts, tmax = epsetstmax(path) if t_start", "PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\",", "/ eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps #", "PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2,", "PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr +", "eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd", "sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd PiEA = PiEAr", "fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from", "PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr)", "\"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\")", "linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1,", "/ k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr", "python import pylab as pl import fluidsim as fls import os import h5py", "def fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\")", "ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE,", "_index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) /", "1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig,", "PiEKr + PiEKd PiEA = PiEAr + PiEAd PiE = PiEK + PiEA", "ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\",", "ax.plot(khE, PiEK, \"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend()", "= _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0)", "_eps(sim, t_start) eps, E, ts, tmax = epsetstmax(path) if t_start is None: t_start", "t_start is None: t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...]", "= cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) *", "path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"], fig, ax) #", "is None: t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] +", "label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\":", "\"r:\", linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__", "ts imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1) / k_f transferEKr", "from paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path,", "ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3)", "as pl import fluidsim as fls import os import h5py from fluidsim.base.output.spect_energy_budget import", "= cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\",", "/ eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps", "cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) *", "transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd)", "\"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig =", "= f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) /", "* sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak #", "os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps = _eps(sim,", "PiEA = PiEAr + PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\")", "transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr =", "== \"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"],", "sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f", "fluidsim as fls import os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base", "import pylab as pl import fluidsim as fls import os import h5py from", "f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) /", "\"__main__\": matplotlib_rc() path_fig = exit_if_figure_exists(__file__) set_figsize(5, 3) fig, ax = pl.subplots() fig2_seb(paths_sim[\"noise_c100nh3840Buinf\"], fig,", "tmax = epsetstmax(path) if t_start is None: t_start = ts imin_plot = _index_where(f[\"times\"][...],", "t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1) /", "import cumsum_inv from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths", "_eps, set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None,", "fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f", "PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ == \"__main__\": matplotlib_rc() path_fig", "PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\") ax.plot(khE,", "paths import paths_sim, exit_if_figure_exists def fig2_seb(path, fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True)", "/ eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak", "imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1) / k_f transferEKr =", "/ eps PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr", "ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file,", "= _k_f(sim.params) # eps = _eps(sim, t_start) eps, E, ts, tmax = epsetstmax(path)", "E, ts, tmax = epsetstmax(path) if t_start is None: t_start = ts imin_plot", "f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps", "as fls import os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import", "cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak", "= PiEAr + PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\")", "PiEAr + PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE,", "= PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE, \"k\", linewidth=2, label=r\"$\\Pi$\")", "k_f = _k_f(sim.params) # eps = _eps(sim, t_start) eps, E, ts, tmax =", "f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps = _eps(sim, t_start) eps,", "#!/usr/bin/env python import pylab as pl import fluidsim as fls import os import", "h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps = _eps(sim, t_start) eps, E, ts,", "eps = _eps(sim, t_start) eps, E, ts, tmax = epsetstmax(path) if t_start is", "path_file = os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps", "cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr + PiEKd PiEA", "+ PiEAd PiE = PiEK + PiEA ax.set_xlabel(\"$k/k_f$\") ax.set_ylabel(r\"$\\Pi(k)/\\epsilon$\") ax.set_xscale(\"log\") ax.set_yscale(\"linear\") ax.plot(khE, PiE,", "= cumsum_inv(transferEKr) * sim.oper.deltak PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak PiEAr = cumsum_inv(transferEAr) *", "= h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps = _eps(sim, t_start) eps, E,", "transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0)", "pylab as pl import fluidsim as fls import os import h5py from fluidsim.base.output.spect_energy_budget", "base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim, exit_if_figure_exists", "eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd =", "= f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps PiEKr = cumsum_inv(transferEKr)", "import fluidsim as fls import os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from", "eps, E, ts, tmax = epsetstmax(path) if t_start is None: t_start = ts", "h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc,", "# PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\") PiEK = PiEKr", "/ eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd", "k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr =", "linewidth=2, label=r\"$\\Pi_K$\") ax.plot(khE, PiEA, \"b--\", linewidth=2, label=r\"$\\Pi_A$\") ax.set_ylim([-0.1, 1.1]) ax.legend() if __name__ ==", "epsetstmax(path) if t_start is None: t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start) khE", "+ 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd = f[\"transfer2D_EKd\"][imin_plot:].mean(0) /", "= cumsum_inv(transferEAr) * sim.oper.deltak PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd)", "= _eps(sim, t_start) eps, E, ts, tmax = epsetstmax(path) if t_start is None:", "from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax from paths import paths_sim,", "None: t_start = ts imin_plot = _index_where(f[\"times\"][...], t_start) khE = (f[\"khE\"][...] + 0.1)", "import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where, _k_f, _eps, set_figsize,", "= f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps # transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0)", "cumsum_inv(transferEAd) * sim.oper.deltak # PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak print(eps) ax.axhline(1.0, color=\"k\", ls=\":\")", "= (f[\"khE\"][...] + 0.1) / k_f transferEKr = f[\"transfer2D_EKr\"][imin_plot:].mean(0) / eps transferEKd =", "f[\"transfer2D_EKd\"][imin_plot:].mean(0) / eps transferEAr = f[\"transfer2D_EAr\"][imin_plot:].mean(0) / eps transferEAd = f[\"transfer2D_EAd\"][imin_plot:].mean(0) / eps", "= os.path.join(path, \"spect_energy_budg.h5\") f = h5py.File(path_file, \"r\") k_f = _k_f(sim.params) # eps =", "os import h5py from fluidsim.base.output.spect_energy_budget import cumsum_inv from base import _index_where, _k_f, _eps,", "fig=None, ax=None, t_start=None): sim = fls.load_sim_for_plot(path, merge_missing_params=True) path_file = os.path.join(path, \"spect_energy_budg.h5\") f =", "ts, tmax = epsetstmax(path) if t_start is None: t_start = ts imin_plot =" ]
[ "from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url", "class Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False)", "SQLAlchemy db = SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True)", "__tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl =", "primary_key=True) title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String, nullable=False)", "= SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True) title =", "db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String,", "= db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl =", "nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String, nullable=False) rollcount = db.Column(db.Integer, nullable=False,", "title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String, nullable=False) rollcount", "imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String, nullable=False) rollcount = db.Column(db.Integer, nullable=False, default=0)", "SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True) title = db.Column(db.String,", "= db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String, nullable=False) rollcount =", "\"rickrolls\" url = db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False)", "Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl", "import SQLAlchemy db = SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String,", "url = db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl", "= \"rickrolls\" url = db.Column(db.String, primary_key=True) title = db.Column(db.String, nullable=False) imgurl = db.Column(db.String,", "db.Column(db.String, nullable=False) imgurl = db.Column(db.String, nullable=False) redirecturl = db.Column(db.String, nullable=False) rollcount = db.Column(db.Integer,", "db = SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url = db.Column(db.String, primary_key=True) title", "flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() class Rickroll(db.Model): __tablename__ = \"rickrolls\" url =" ]
[ "plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\")", "k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()),", "in range(K): X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2)", ":].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\",", ":].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename", "variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0,", "def plot_model(variational_model, X_true, K, M, savename=None): for k in range(K): X, mu, x_pre,", "savename=None): for k in range(K): X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:,", "log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\",", "numpy as np import matplotlib.pyplot as plt def plot_model(variational_model, X_true, K, M, savename=None):", "X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k,", "x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1),", "import numpy as np import matplotlib.pyplot as plt def plot_model(variational_model, X_true, K, M,", "c=\"k\", lw=\"5\", ls=\"--\") if savename is None: plt.show() else: plt.savefig(savename + \"_{}\".format(k)) plt.clf()", "K, M, savename=None): for k in range(K): X, mu, x_pre, log_jacobian, epsilon_loss =", "import matplotlib.pyplot as plt def plot_model(variational_model, X_true, K, M, savename=None): for k in", "k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if", "M, savename=None): for k in range(K): X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M)", "plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k,", "np import matplotlib.pyplot as plt def plot_model(variational_model, X_true, K, M, savename=None): for k", "plot_model(variational_model, X_true, K, M, savename=None): for k in range(K): X, mu, x_pre, log_jacobian,", "as np import matplotlib.pyplot as plt def plot_model(variational_model, X_true, K, M, savename=None): for", "c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is None:", "k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is None: plt.show() else: plt.savefig(savename +", "mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()),", "ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is None: plt.show() else:", "1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is", "k in range(K): X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()),", "X_true, K, M, savename=None): for k in range(K): X, mu, x_pre, log_jacobian, epsilon_loss", "range(K): X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:,", "= variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\")", "epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\",", "lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is None: plt.show()", "plt def plot_model(variational_model, X_true, K, M, savename=None): for k in range(K): X, mu,", ":].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is None: plt.show() else: plt.savefig(savename + \"_{}\".format(k))", "matplotlib.pyplot as plt def plot_model(variational_model, X_true, K, M, savename=None): for k in range(K):", "as plt def plot_model(variational_model, X_true, K, M, savename=None): for k in range(K): X,", "for k in range(K): X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M) plt.plot(np.transpose(X[:, k,", "alpha=0.2) plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c=\"r\", lw=\"3\", ls=\"--\") plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\",", "plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c=\"k\", lw=\"5\", ls=\"--\") if savename is None: plt.show() else: plt.savefig(savename" ]
[ "again!\") sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i was provided, so requiring to", "args.seqid and not args.separate: sys.stderr.write(\"-i was provided, so requiring to separate files (--separate", "genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped", "sid != lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid", "for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if args.functions: try:", "= open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\",", "help='output file for orfs (.orfs will be appended)') parser.add_argument('-f', '--functions', help='output file for", "continue if sid != lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid", "out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did = True if not", "as e: sys.stderr.write(f\"There was an error writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder:", "and seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i", "tple)) + \"\\n\") did = True if not did and args.separate: lastid =", "file for nucleotide sequence (.fna will be appended)') parser.add_argument('-p', '--ptt', help='output file for", "continue if seqid != lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid", "does not exist. Please check the file path and try again!\") sys.exit(1) if", "args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError as e: sys.stderr.write(f\"There was an", "out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with", "lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if", "table') parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will be appended)') parser.add_argument('-f', '--functions',", "roblib import genbank from Bio import SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright", "{seqid} not provided in -i options\\n\") continue if seqid != lastid: if out:", "out: for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True", "not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\") continue", "as out: for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did =", "l))) out.write(\"\\n\") did = True if args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\",", "if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid, seq", "if args.aminoacids: if args.separate: lastid = None out = None for seqid, sid,", "else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\")", "parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier", "parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and may not work with everything!',", "for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True if", "out: for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids:", "output these sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder", "{args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as out: for tple in", "sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\") continue if seqid != lastid: if", "out.close() did = True if not did: sys.stderr.write(\"Please provide either a -n, -a,", "out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with", "= False if args.nucleotide: if args.separate: lastid = None out = None for", "tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True if not", "seqid != lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid", "seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if args.separate: lastid", "out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else:", "seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid not in", "SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__", "'__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex", "out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else:", "action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please", "file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for", "else: out = open(args.functions, 'w') for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v):", "== '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex',", "args.seqid and seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in", "args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w')", "Please check the file path and try again!\") sys.exit(1) if args.seqid and not", "in genbank_seqio(args.genbank): if args.seqid and seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id}", "action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank}", "genbank from Bio import SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright 2020, <NAME>'", "and may not work with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args =", "os import sys import gzip import argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs,", "these sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder file')", "+ \"\\n\") did = True if not did and args.separate: lastid = None", "did = True if args.aminoacids: if args.separate: lastid = None out = None", "True if args.aminoacids: if args.separate: lastid = None out = None for seqid,", "options\\n\") continue if seqid != lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w')", "open(args.phage_finder, 'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\")", "if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided", "open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w')", "__name__ == '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c',", "file to sequences \"\"\" import os import sys import gzip import argparse from", "in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True if not did", "appended)') parser.add_argument('-p', '--ptt', help='output file for the ptt protein table') parser.add_argument('-o', '--orfs', help='output", "with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank):", "!= lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\")", "help='output file for nucleotide sequence (.fna will be appended)') parser.add_argument('-p', '--ptt', help='output file", "help='verbose output', action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not", "in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if args.functions: try: if args.zip:", "continue if seqid != lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid", "= 'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>'", "\"\\n\") did = True if not did and args.separate: lastid = None out", "= '<EMAIL>' if __name__ == '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank", "out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if args.separate: lastid = None out =", "file for two column table of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only output", "in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\") continue if", "= True if not did: sys.stderr.write(\"Please provide either a -n, -a, -o, -p,", "False if args.nucleotide: if args.separate: lastid = None out = None for sid,", "args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided in", "(with no other options just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output.", "!= lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\")", "True if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as out:", "to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as out: for tple", "= open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\",", "amino acid sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide", "as out: for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if", "r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as out: for l in", "with open(args.ptt, 'w') as out: for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did", "'--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output", "['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' if __name__ ==", "genbank_seqio from roblib import genbank from Bio import SeqIO __author__ = '<NAME>' __copyright__", "will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna will be", "__author__ = '<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__ =", "sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v):", "import sys import gzip import argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt,", "SeqIO.write(seq, out, 'genbank') out.close() did = True if not did: sys.stderr.write(\"Please provide either", "import os import sys import gzip import argparse from roblib import genbank_to_faa, genbank_to_fna,", "did = False if args.nucleotide: if args.separate: lastid = None out = None", "a phage finder file') parser.add_argument('--separate', action='store_true', help='separate output into different files (with no", "= genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as out: for l in r:", "to separate files (--separate assumed)\\n\") args.separate = True did = False if args.nucleotide:", "sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\") continue if sid != lastid: if", "options\\n\") continue if sid != lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w')", "was an error writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w')", "out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if args.separate: lastid = None out =", "appended)') parser.add_argument('-f', '--functions', help='output file for two column table of [protein id, function]')", "path and try again!\") sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i was provided,", "if not did: sys.stderr.write(\"Please provide either a -n, -a, -o, -p, -f output", "out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid, seq in genbank_to_faa(args.genbank,", "if __name__ == '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True)", "not work with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() if", "allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate', action='store_true', help='separate output into", "id, function]') parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple -i allowed]', action='append')", "out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if args.functions: try: if args.zip: out =", "= True if not did and args.separate: lastid = None out = None", "out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close()", "Convert a genbank file to sequences \"\"\" import os import sys import gzip", "\"\"\" Convert a genbank file to sequences \"\"\" import os import sys import", "ptt protein table') parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will be appended)')", "nucleotide sequence (.fna will be appended)') parser.add_argument('-p', '--ptt', help='output file for the ptt", "out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid,", "= open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\",", "seq in genbank_seqio(args.genbank): if args.seqid and seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped", "import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank from Bio", "genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as out: for l in r: out.write(\"\\t\".join(map(str,", "parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check the file", "args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\") continue if sid != lastid:", "genbank_seqio(args.genbank): if args.seqid and seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not", "args.separate: lastid = None out = None for seqid, sid, seq in genbank_to_faa(args.genbank,", "'--orfs', help='output file for orfs (.orfs will be appended)') parser.add_argument('-f', '--functions', help='output file", "'--seqid', help='Only output these sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a", "'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did", "in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\") continue out", "if args.separate: lastid = None out = None for sid, seq in genbank_to_fna(args.genbank,", "if args.separate: lastid = None out = None for seqid, sid, seq in", "provided in -i options\\n\") continue if seqid != lastid: if out: out.close() out", "open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\")", "from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank", "requiring to separate files (--separate assumed)\\n\") args.separate = True did = False if", "None for sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not in", "lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if", "out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with", "open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w')", "for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if", "genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r = genbank_to_ptt(args.genbank, False,", "IOError as e: sys.stderr.write(f\"There was an error writing to {args.functions}: {e}\\n\") sys.exit(1) if", "for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid not", "help=\"output file for the amino acid sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide',", "action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the amino acid sequences (.faa will be", "if seqid != lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid =", "options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did = True", "-i options\\n\") continue if seqid != lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\",", "'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as", "out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else:", "in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid: if args.v:", "may not work with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args()", "will be appended)') parser.add_argument('-p', '--ptt', help='output file for the ptt protein table') parser.add_argument('-o',", "if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out:", "if args.nucleotide: if args.separate: lastid = None out = None for sid, seq", "if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq in", "lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out:", "if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check the file path", "genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError as e: sys.stderr.write(f\"There", "be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna will be appended)')", "sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check the file path and try again!\")", "for two column table of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only output these", "out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close()", "False, args.v) with open(args.ptt, 'w') as out: for l in r: out.write(\"\\t\".join(map(str, l)))", "args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w')", "parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna will be appended)') parser.add_argument('-p', '--ptt',", "lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out:", "= '<NAME>' __email__ = '<EMAIL>' if __name__ == '__main__': parser = argparse.ArgumentParser(description=\" \")", "#!/usr/bin/env python \"\"\" Convert a genbank file to sequences \"\"\" import os import", "parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the amino", "if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as out: for", "-i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did =", "column table of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s)", "error writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as out:", "not provided in -i options\\n\") continue if seqid != lastid: if out: out.close()", "not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check the file path and", "did = True out.close() except IOError as e: sys.stderr.write(f\"There was an error writing", "else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex,", "sid, pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except", "lastid = None out = None for seq in genbank_seqio(args.genbank): if args.seqid and", "as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did =", "= None out = None for sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid", "sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\")", "seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid,", "files (--separate assumed)\\n\") args.separate = True did = False if args.nucleotide: if args.separate:", "out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close()", "lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out:", "sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not in args.seqid: if", "out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt,", "__credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' if", "l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if args.functions: try: if", "out = None for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid", "= True if args.orfs: if args.separate: lastid = None out = None for", "file for orfs (.orfs will be appended)') parser.add_argument('-f', '--functions', help='output file for two", "args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if args.separate: lastid = None out", "seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid: if", "True if args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out =", "if args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions,", "genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank from Bio import SeqIO __author__", "'w') SeqIO.write(seq, out, 'genbank') out.close() did = True if not did: sys.stderr.write(\"Please provide", "orfs (.orfs will be appended)') parser.add_argument('-f', '--functions', help='output file for two column table", "open(args.ptt, 'w') as out: for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did =", "2020, <NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>' __email__ =", "seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if args.separate:", "None out = None for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if", "parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make", "sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r", "did = True if not did and args.separate: lastid = None out =", "sys.stderr.write(\"Please provide either a -n, -a, -o, -p, -f output file! (or all)\\n\")", "help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file", "-i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate', action='store_true', help='separate output", "provided, so requiring to separate files (--separate assumed)\\n\") args.separate = True did =", "sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\")", "as out: for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did =", "genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True if not did and", "not args.separate: sys.stderr.write(\"-i was provided, so requiring to separate files (--separate assumed)\\n\") args.separate", "sys.stderr.write(f\"There was an error writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder,", "action='append') parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate', action='store_true', help='separate output into different", "in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\") continue if", "be appended)') parser.add_argument('-f', '--functions', help='output file for two column table of [protein id,", "files (with no other options just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the", "python \"\"\" Convert a genbank file to sequences \"\"\" import os import sys", "parser.add_argument('-f', '--functions', help='output file for two column table of [protein id, function]') parser.add_argument('-i',", "genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped", "parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does", "sys.stderr.write(\"-i was provided, so requiring to separate files (--separate assumed)\\n\") args.separate = True", "other options just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and", "open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w')", "__license__ = 'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' if __name__ == '__main__':", "prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError as", "import gzip import argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio", "as out: for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if", "__copyright__ = 'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__ =", "gzip import argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from", "= True did = False if args.nucleotide: if args.separate: lastid = None out", "genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank from Bio import SeqIO __author__ =", "for the amino acid sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file", "seqid != lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid", "sys import gzip import argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions,", "parser.add_argument('--separate', action='store_true', help='separate output into different files (with no other options just output", "args.v): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not", "seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid,", "if args.phage_finder: with open(args.phage_finder, 'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str,", "seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid not in", "did = True if args.orfs: if args.separate: lastid = None out = None", "out.close() except IOError as e: sys.stderr.write(f\"There was an error writing to {args.functions}: {e}\\n\")", "args.separate = True did = False if args.nucleotide: if args.separate: lastid = None", "the file path and try again!\") sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i", "open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\")", "argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import", "!= lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\")", "= open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did = True if not did:", "the amino acid sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for", "out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq", "if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\") continue if seqid !=", "phage finder file') parser.add_argument('--separate', action='store_true', help='separate output into different files (with no other", "help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the amino acid sequences", "= seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out: for", "out = None for seq in genbank_seqio(args.genbank): if args.seqid and seq.id not in", "table of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple", "args.complex): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not", "if args.seqid and seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided", "protein table') parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will be appended)') parser.add_argument('-f',", "everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL:", "did = True if args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else:", "out, 'genbank') out.close() did = True if not did: sys.stderr.write(\"Please provide either a", "genbank_to_functions, genbank_seqio from roblib import genbank from Bio import SeqIO __author__ = '<NAME>'", "args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\") continue if seqid", "import argparse from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib", "required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the", "args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if args.separate: lastid = None", "(.fna will be appended)') parser.add_argument('-p', '--ptt', help='output file for the ptt protein table')", "parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids',", "sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if args.separate:", "seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs:", "'w') as out: for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True", "with open(args.phage_finder, 'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) +", "not did: sys.stderr.write(\"Please provide either a -n, -a, -o, -p, -f output file!", "out: for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if args.functions:", "sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i was provided, so requiring to separate", "'w') as out: for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did", "import genbank from Bio import SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright 2020,", "the output. Experimental and may not work with everything!', action='store_true') parser.add_argument('-v', help='verbose output',", "{args.genbank} does not exist. Please check the file path and try again!\") sys.exit(1)", "'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as", "file') parser.add_argument('--separate', action='store_true', help='separate output into different files (with no other options just", "\") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true') parser.add_argument('-a',", "args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\") continue if seqid != lastid:", "assumed)\\n\") args.separate = True did = False if args.nucleotide: if args.separate: lastid =", "'--functions', help='output file for two column table of [protein id, function]') parser.add_argument('-i', '--seqid',", "'<EMAIL>' if __name__ == '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file',", "if args.orfs: if args.separate: lastid = None out = None for seqid, sid,", "args.nucleotide: if args.separate: lastid = None out = None for sid, seq in", "True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError as e: sys.stderr.write(f\"There was", "try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w') for", "(--separate assumed)\\n\") args.separate = True did = False if args.nucleotide: if args.separate: lastid", "if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if out:", "None for seq in genbank_seqio(args.genbank): if args.seqid and seq.id not in args.seqid: if", "= True if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as", "out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w') for sid, pid, prod", "open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did =", "action='store_true', help='separate output into different files (with no other options just output gbk).')", "seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r =", "genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid}", "True if not did and args.separate: lastid = None out = None for", "None out = None for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if", "args.orfs: if args.separate: lastid = None out = None for seqid, sid, seq", "args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\") continue out =", "and try again!\") sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i was provided, so", "parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will be appended)') parser.add_argument('-f', '--functions', help='output", "help='Only output these sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage", "{seq.id} not provided in -i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out,", "'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>' __email__", "-i options\\n\") continue if seqid != lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\",", "True out.close() except IOError as e: sys.stderr.write(f\"There was an error writing to {args.functions}:", "not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\") continue", "sequence (.fna will be appended)') parser.add_argument('-p', '--ptt', help='output file for the ptt protein", "for the ptt protein table') parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will", "lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w') lastid = seqid out.write(f\">{sid}\\n{seq}\\n\") if", "and args.separate: lastid = None out = None for seq in genbank_seqio(args.genbank): if", "line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the amino acid sequences (.faa will", "= None out = None for seq in genbank_seqio(args.genbank): if args.seqid and seq.id", "<reponame>linsalrob/EdwardsLab<filename>bin/genbank2sequences.py #!/usr/bin/env python \"\"\" Convert a genbank file to sequences \"\"\" import os", "and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i", "out: for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True", "help='output file for two column table of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only", "and not args.separate: sys.stderr.write(\"-i was provided, so requiring to separate files (--separate assumed)\\n\")", "'w') as out: for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True", "{e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank,", "'w') as out: for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did", "for sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not in args.seqid:", "in -i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did", "if args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\") continue if sid !=", "in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r = genbank_to_ptt(args.genbank,", "args.complex, args.v): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid}", "genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank from Bio import", "= '<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT'", "if not did and args.separate: lastid = None out = None for seq", "in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if args.separate: lastid =", "True if args.orfs: if args.separate: lastid = None out = None for seqid,", "args.v) with open(args.ptt, 'w') as out: for l in r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\")", "None for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid", "args.separate: lastid = None out = None for sid, seq in genbank_to_fna(args.genbank, args.complex):", "provided in -i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close()", "True did = False if args.nucleotide: if args.separate: lastid = None out =", "not exist. Please check the file path and try again!\") sys.exit(1) if args.seqid", "the ptt protein table') parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will be", "lastid = None out = None for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex,", "in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError as e:", "did: sys.stderr.write(\"Please provide either a -n, -a, -o, -p, -f output file! (or", "file for the ptt protein table') parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs", "'--zip', help='gzip compress the output. Experimental and may not work with everything!', action='store_true')", "args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in", "= 'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' if __name__ == '__main__': parser", "if sid != lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid =", "from Bio import SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__", "= gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w') for sid, pid, prod in", "'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as", "args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w') for sid, pid,", "True if not did: sys.stderr.write(\"Please provide either a -n, -a, -o, -p, -f", "r: out.write(\"\\t\".join(map(str, l))) out.write(\"\\n\") did = True if args.functions: try: if args.zip: out", "= True if args.aminoacids: if args.separate: lastid = None out = None for", "out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid,", "{sid} not provided in -i options\\n\") continue if sid != lastid: if out:", "genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank from Bio import SeqIO", "file for the amino acid sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output", "= None out = None for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v):", "with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v):", "= None for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and", "not did and args.separate: lastid = None out = None for seq in", "= True out.close() except IOError as e: sys.stderr.write(f\"There was an error writing to", "out = None for sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid", "= None for sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not", "sequences \"\"\" import os import sys import gzip import argparse from roblib import", "two column table of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only output these sequence", "ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate', action='store_true',", "output into different files (with no other options just output gbk).') parser.add_argument('-z', '--zip',", "did = True if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w')", "just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and may not", "check the file path and try again!\") sys.exit(1) if args.seqid and not args.separate:", "Bio import SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__ =", "parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate', action='store_true', help='separate output into different files", "for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close()", "be appended)') parser.add_argument('-p', '--ptt', help='output file for the ptt protein table') parser.add_argument('-o', '--orfs',", "args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i options\\n\") continue if sid", "did = True if not did: sys.stderr.write(\"Please provide either a -n, -a, -o,", "e: sys.stderr.write(f\"There was an error writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with", "exist. Please check the file path and try again!\") sys.exit(1) if args.seqid and", "out = None for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid", "to sequences \"\"\" import os import sys import gzip import argparse from roblib", "None out = None for seq in genbank_seqio(args.genbank): if args.seqid and seq.id not", "different files (with no other options just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress", "work with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() if not", "file path and try again!\") sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i was", "import SeqIO __author__ = '<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__ = ['<NAME>']", "= None for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and", "out = open(args.functions, 'w') for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\")", "not provided in -i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank')", "args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with open(args.ptt, 'w') as out: for l", "for seq in genbank_seqio(args.genbank): if args.seqid and seq.id not in args.seqid: if args.v:", "in -i options\\n\") continue if sid != lastid: if out: out.close() out =", "if out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid, seq", "'genbank') out.close() did = True if not did: sys.stderr.write(\"Please provide either a -n,", "genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if args.separate: lastid =", "and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided in -i", "'--ptt', help='output file for the ptt protein table') parser.add_argument('-o', '--orfs', help='output file for", "not provided in -i options\\n\") continue if sid != lastid: if out: out.close()", "out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq in genbank_to_fna(args.genbank,", "did and args.separate: lastid = None out = None for seq in genbank_seqio(args.genbank):", "with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did", "'w') for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True", "args.aminoacids: if args.separate: lastid = None out = None for seqid, sid, seq", "open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did = True if not did: sys.stderr.write(\"Please", "was provided, so requiring to separate files (--separate assumed)\\n\") args.separate = True did", "except IOError as e: sys.stderr.write(f\"There was an error writing to {args.functions}: {e}\\n\") sys.exit(1)", "options\\n\") continue if seqid != lastid: if out: out.close() out = open(f\"{args.aminoacids}.{seqid}.faa\", 'w')", "for sid, seq in genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if", "parser.add_argument('-a', '--aminoacids', help=\"output file for the amino acid sequences (.faa will be appended)\")", "in -i options\\n\") continue if seqid != lastid: if out: out.close() out =", "= ['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' if __name__", "args.separate: sys.stderr.write(\"-i was provided, so requiring to separate files (--separate assumed)\\n\") args.separate =", "out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid, seq in genbank_to_orfs(args.genbank,", "lastid = None out = None for sid, seq in genbank_to_fna(args.genbank, args.complex): if", "__email__ = '<EMAIL>' if __name__ == '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank',", "Experimental and may not work with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true') args", "provided in -i options\\n\") continue if sid != lastid: if out: out.close() out", "pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError", "sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna", "sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate',", "of [protein id, function]') parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple -i", "genbank_to_fna(args.genbank, args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if args.separate: lastid = None", "writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as out: for", "into different files (with no other options just output gbk).') parser.add_argument('-z', '--zip', help='gzip", "= True if args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out", "gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w') for sid, pid, prod in genbank_to_functions(args.genbank,", "in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped", "out: out.close() else: with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid, seq in", "= argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line',", "continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq, out, 'genbank') out.close() did = True if", "for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if", "args.separate: lastid = None out = None for seq in genbank_seqio(args.genbank): if args.seqid", "'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' if __name__ == '__main__': parser =", "gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and may not work with", "= sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for", "else: with open(f\"{args.orfs}.orfs\", 'w') as out: for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex,", "help='make a phage finder file') parser.add_argument('--separate', action='store_true', help='separate output into different files (with", "seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and sid not in args.seqid: if args.v:", "if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt') else: out = open(args.functions, 'w') for sid,", "None out = None for sid, seq in genbank_to_fna(args.genbank, args.complex): if args.seqid and", "(.orfs will be appended)') parser.add_argument('-f', '--functions', help='output file for two column table of", "sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid:", "for orfs (.orfs will be appended)') parser.add_argument('-f', '--functions', help='output file for two column", "args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check", "sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid:", "output. Experimental and may not work with everything!', action='store_true') parser.add_argument('-v', help='verbose output', action='store_true')", "= None for seq in genbank_seqio(args.genbank): if args.seqid and seq.id not in args.seqid:", "if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\") continue out = open(f\"{seq.id}.gbk\",", "with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v):", "identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the amino acid sequences (.faa", "sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\") continue out = open(f\"{seq.id}.gbk\", 'w') SeqIO.write(seq,", "(.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna will", "= seqid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for", "genbank file to sequences \"\"\" import os import sys import gzip import argparse", "help='separate output into different files (with no other options just output gbk).') parser.add_argument('-z',", "= None out = None for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v):", "out.write(\"\\n\") did = True if args.functions: try: if args.zip: out = gzip.open(f\"{args.functions}.gz\", 'wt')", "args.separate: lastid = None out = None for seqid, sid, seq in genbank_to_orfs(args.genbank,", "open(args.functions, 'w') for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did =", "= parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check the", "a genbank file to sequences \"\"\" import os import sys import gzip import", "out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True", "so requiring to separate files (--separate assumed)\\n\") args.separate = True did = False", "seq.id not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seq.id} not provided in -i options\\n\")", "roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio from roblib import genbank from", "\"\"\" import os import sys import gzip import argparse from roblib import genbank_to_faa,", "out: out.close() else: with open(f\"{args.aminoacids}.faa\", 'w') as out: for seqid, sid, seq in", "acid sequences (.faa will be appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence", "compress the output. Experimental and may not work with everything!', action='store_true') parser.add_argument('-v', help='verbose", "for nucleotide sequence (.fna will be appended)') parser.add_argument('-p', '--ptt', help='output file for the", "'--aminoacids', help=\"output file for the amino acid sequences (.faa will be appended)\") parser.add_argument('-n',", "for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid not", "separate files (--separate assumed)\\n\") args.separate = True did = False if args.nucleotide: if", "out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True if not did and args.separate: lastid", "None for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid", "'<NAME>' __copyright__ = 'Copyright 2020, <NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__", "out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did = True out.close() except IOError as e: sys.stderr.write(f\"There was an error", "no other options just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental", "an error writing to {args.functions}: {e}\\n\") sys.exit(1) if args.phage_finder: with open(args.phage_finder, 'w') as", "argparse.ArgumentParser(description=\" \") parser.add_argument('-g', '--genbank', help='genbank file', required=True) parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true')", "os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist. Please check the file path and try", "'wt') else: out = open(args.functions, 'w') for sid, pid, prod in genbank_to_functions(args.genbank, True,", "__maintainer__ = '<NAME>' __email__ = '<EMAIL>' if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"", "appended)\") parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna will be appended)') parser.add_argument('-p',", "help='gzip compress the output. Experimental and may not work with everything!', action='store_true') parser.add_argument('-v',", "seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt:", "<NAME>' __credits__ = ['<NAME>'] __license__ = 'MIT' __maintainer__ = '<NAME>' __email__ = '<EMAIL>'", "args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v) with", "finder file') parser.add_argument('--separate', action='store_true', help='separate output into different files (with no other options", "output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and may not work", "in genbank_to_faa(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid: if args.v:", "in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if args.separate: lastid", "sid out.write(f\">{sid}\\n{seq}\\n\") if out: out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid,", "args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.ptt: r = genbank_to_ptt(args.genbank, False, args.v)", "if args.seqid and not args.separate: sys.stderr.write(\"-i was provided, so requiring to separate files", "not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {seqid} not provided in -i options\\n\") continue", "-i options\\n\") continue if sid != lastid: if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\",", "args.complex): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.aminoacids: if args.separate: lastid = None out", "sid, seq in genbank_to_faa(args.genbank, args.complex, args.v): out.write(f\">{sid}\\n{seq}\\n\") did = True if args.orfs: if", "out.close() else: with open(f\"{args.nucleotide}.fna\", 'w') as out: for sid, seq in genbank_to_fna(args.genbank, args.complex):", "if seqid != lastid: if out: out.close() out = open(f\"{args.orfs}.{seqid}.orfs\", 'w') lastid =", "from roblib import genbank from Bio import SeqIO __author__ = '<NAME>' __copyright__ =", "help='output file for the ptt protein table') parser.add_argument('-o', '--orfs', help='output file for orfs", "parser.add_argument('-p', '--ptt', help='output file for the ptt protein table') parser.add_argument('-o', '--orfs', help='output file", "args.v): out.write(\"\\t\".join(map(str, tple)) + \"\\n\") did = True if not did and args.separate:", "will be appended)') parser.add_argument('-f', '--functions', help='output file for two column table of [protein", "= open(args.functions, 'w') for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v): out.write(f\"{sid}\\t{pid}\\t{prod}\\n\") did", "[multiple -i allowed]', action='append') parser.add_argument('--phage_finder', help='make a phage finder file') parser.add_argument('--separate', action='store_true', help='separate", "seq in genbank_to_orfs(args.genbank, args.complex, args.v): if args.seqid and sid not in args.seqid: if", "args.phage_finder: with open(args.phage_finder, 'w') as out: for tple in genbank.genbank_to_phage_finder(args.genbank, args.v): out.write(\"\\t\".join(map(str, tple))", "options just output gbk).') parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and may", "lastid = None out = None for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex,", "'<NAME>' __email__ = '<EMAIL>' if __name__ == '__main__': parser = argparse.ArgumentParser(description=\" \") parser.add_argument('-g',", "if args.seqid and sid not in args.seqid: if args.v: sys.stderr.write(f\"Skipped {sid} not provided", "try again!\") sys.exit(1) if args.seqid and not args.separate: sys.stderr.write(\"-i was provided, so requiring", "'--complex', help='complex identifier line', action='store_true') parser.add_argument('-a', '--aminoacids', help=\"output file for the amino acid", "output', action='store_true') args = parser.parse_args() if not os.path.exists(args.genbank): sys.stderr.write(f\"FATAL: {args.genbank} does not exist.", "'--nucleotide', help='output file for nucleotide sequence (.fna will be appended)') parser.add_argument('-p', '--ptt', help='output", "if out: out.close() out = open(f\"{args.nucleotide}.{sid}.fna\", 'w') lastid = sid out.write(f\">{sid}\\n{seq}\\n\") if out:", "function]') parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple -i allowed]', action='append') parser.add_argument('--phage_finder',", "[protein id, function]') parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple -i allowed]'," ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator", "json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif", "plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = []", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id']", "import unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base from oslo_config import", "copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']:", "License. # You may obtain a copy of the License at # #", "candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id,", "limitations under the License. # # ------------------------------------------------------------------------- # import copy import json import", "= triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {}", "= self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type =", "optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type", "= cfg.CONF io = StringIO() class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace,", "# Copyright (c) 2015-2018 AT&T Intellectual Property # # Licensed under the Apache", "language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------- #", "copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']:", "law or agreed to in writing, software # distributed under the License is", "cfg from StringIO import StringIO CONF = cfg.CONF io = StringIO() class TriageLatency(object):", "the License for the specific language governing permissions and # limitations under the", "op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for", "compliance with the License. # You may obtain a copy of the License", "CONF = cfg.CONF io = StringIO() class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model(", "self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)", "# import copy import json import unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id,", "this file except in compliance with the License. # You may obtain a", "= optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "json import unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base from oslo_config", "you may not use this file except in compliance with the License. #", "request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list =", "latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] =", "ANY KIND, either express or implied. # See the License for the specific", "triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type", "json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']:", "def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op =", "== \"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type)", "updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type)", "triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] =", "= base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type']", "baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def", "takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {}", "self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator)", "conductor.common.music.model import base from oslo_config import cfg from StringIO import StringIO CONF =", "triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped =", "in compliance with the License. # You may obtain a copy of the", "= copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt in", "2015-2018 AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= StringIO() class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={}", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "oslo_config import cfg from StringIO import StringIO CONF = cfg.CONF io = StringIO()", "{} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self,", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "base from oslo_config import cfg from StringIO import StringIO CONF = cfg.CONF io", "<gh_stars>0 # # ------------------------------------------------------------------------- # Copyright (c) 2015-2018 AT&T Intellectual Property # #", "and # limitations under the License. # # ------------------------------------------------------------------------- # import copy import", "from conductor.common.music.model import base from oslo_config import cfg from StringIO import StringIO CONF", "triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta =", "See the License for the specific language governing permissions and # limitations under", "keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type']", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id):", "json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped)", "triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD',", "= self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type", "OF ANY KIND, either express or implied. # See the License for the", "candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type']", "2.0 (the \"License\"); # you may not use this file except in compliance", "= op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type']", "# you may not use this file except in compliance with the License.", "in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL =", "# # ------------------------------------------------------------------------- # Copyright (c) 2015-2018 AT&T Intellectual Property # # Licensed", "__init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self,", "op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes']", "for the specific language governing permissions and # limitations under the License. #", "agreed to in writing, software # distributed under the License is distributed on", "specific language governing permissions and # limitations under the License. # # -------------------------------------------------------------------------", "# # ------------------------------------------------------------------------- # import copy import json import unicodedata from conductor.common.models.triage_tool import", "import StringIO CONF = cfg.CONF io = StringIO() class TriageLatency(object): def __init__(self): self.TriageTool", "= json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name'] ==", "# limitations under the License. # # ------------------------------------------------------------------------- # import copy import json", "import base from oslo_config import cfg from StringIO import StringIO CONF = cfg.CONF", "latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL = json.dumps(latency_dropped) triageRowUpdate.triage_translator = triaL triageRowUpdate.update()", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "= {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def", "optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id']", "= self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] ==", "= unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl", "optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0]", "copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for", "(the \"License\"); # you may not use this file except in compliance with", "self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op=", "for tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL = json.dumps(latency_dropped) triageRowUpdate.triage_translator", "= {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\",", "classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self,", "under the License. # # ------------------------------------------------------------------------- # import copy import json import unicodedata", "plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped", "Copyright (c) 2015-2018 AT&T Intellectual Property # # Licensed under the Apache License,", "unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base from oslo_config import cfg", "# # Unless required by applicable law or agreed to in writing, software", "io = StringIO() class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\")", "import copy import json import unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import", "= candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] ==", "------------------------------------------------------------------------- # import copy import json import unicodedata from conductor.common.models.triage_tool import TriageTool from", "express or implied. # See the License for the specific language governing permissions", "demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base from oslo_config import cfg from", "except in compliance with the License. # You may obtain a copy of", "triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type']", "by applicable law or agreed to in writing, software # distributed under the", "elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type", "= [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason):", "either express or implied. # See the License for the specific language governing", "self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in", "self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\":", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] =", "= self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii',", "tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL", "in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL = json.dumps(latency_dropped) triageRowUpdate.triage_translator = triaL", "self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def takeOpimaztionType(self, optimation_type):", "\"latency_between\": latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate", "from oslo_config import cfg from StringIO import StringIO CONF = cfg.CONF io =", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "import cfg from StringIO import StringIO CONF = cfg.CONF io = StringIO() class", "StringIO import StringIO CONF = cfg.CONF io = StringIO() class TriageLatency(object): def __init__(self):", "triageRowUpdate.optimization_type = op triageRowUpdate.update() elif self.optimzation['opimization_type'] == \"latency_between\": latency_dropped = {} optimization_type =", "def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped =", "file except in compliance with the License. # You may obtain a copy", "for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl)", "Intellectual Property # # Licensed under the Apache License, Version 2.0 (the \"License\");", "self.latency_dropped = [] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator =", "import json import unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base from", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op", "\"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate =", "the License. # You may obtain a copy of the License at #", "conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base from oslo_config import cfg from StringIO", "tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL = json.dumps(latency_dropped) triageRowUpdate.triage_translator =", "to in writing, software # distributed under the License is distributed on an", "reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason'] = reason", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore')", "the specific language governing permissions and # limitations under the License. # #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "latency_dropped = {} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate =", "implied. # See the License for the specific language governing permissions and #", "def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] =", "\"License\"); # you may not use this file except in compliance with the", "== \"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate", "self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "the License. # # ------------------------------------------------------------------------- # import copy import json import unicodedata from", "Property # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "required by applicable law or agreed to in writing, software # distributed under", "TriageTool from conductor.common.music.model import base from oslo_config import cfg from StringIO import StringIO", "def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped = [] def", "self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op", "plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta", "= op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra)", "# ------------------------------------------------------------------------- # import copy import json import unicodedata from conductor.common.models.triage_tool import TriageTool", "applicable law or agreed to in writing, software # distributed under the License", "[] def takeOpimaztionType(self, optimation_type): self.optimzation['opimization_type'] = optimation_type def latencyDroppedCandiate(self, candidate_id, demand_id, reason): candiate_dropped", "# ------------------------------------------------------------------------- # Copyright (c) 2015-2018 AT&T Intellectual Property # # Licensed under", "{} optimization_type = self.optimzation['opimization_type'] latency_dropped['dropped_cadidtes'] = self.latency_dropped op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0]", "candidate_id candiate_dropped['reason'] = reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\":", "AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0 (the", "StringIO() class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped", "or agreed to in writing, software # distributed under the License is distributed", "License. # # ------------------------------------------------------------------------- # import copy import json import unicodedata from conductor.common.models.triage_tool", "copy import json import unicodedata from conductor.common.models.triage_tool import TriageTool from conductor.common.music.model import base", "class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool, classname=\"TriageTool\") self.optimzation={} self.latency_dropped =", "copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore') cop_ta = json.loads(copy_tra) for tt", "or implied. # See the License for the specific language governing permissions and", "if self.optimzation['opimization_type'] == \"distance_between\": optimization_type = self.optimzation['opimization_type'] op = json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\",", "'ignore') cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if", "op= json.dumps(optimization_type) triageRowUpdate = self.TriageTool.query.get_plan_by_col(\"id\", plan_id)[0] triageRowUpdate.optimization_type = op copy_translator = copy.copy(triageRowUpdate.triage_translator) copy_tra", "------------------------------------------------------------------------- # Copyright (c) 2015-2018 AT&T Intellectual Property # # Licensed under the", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "candidate_id, demand_id, reason): candiate_dropped = {} candiate_dropped['demand_id'] = demand_id candiate_dropped['candidate_id'] = candidate_id candiate_dropped['reason']", "= json.dumps(optimization_type) triage_dropped_list = self.TriageTool.query.get_plan_by_col(\"id\", plan_id) triageRowUpdate = triage_dropped_list[0] triageRowUpdate.optimization_type = op triageRowUpdate.update()", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "import TriageTool from conductor.common.music.model import base from oslo_config import cfg from StringIO import", "cfg.CONF io = StringIO() class TriageLatency(object): def __init__(self): self.TriageTool = base.create_dynamic_model( keyspace=CONF.keyspace, baseclass=TriageTool,", "= reason self.latency_dropped.append(candiate_dropped) def updateTriageLatencyDB(self, plan_id, request_id): if self.optimzation['opimization_type'] == \"distance_between\": optimization_type =", "cop_ta = json.loads(copy_tra) for tt in cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name']", "with the License. # You may obtain a copy of the License at", "cop_ta['translator_triage']['dropped_candidates']: for tl in latency_dropped['dropped_cadidtes']: if tt['name'] == tl['demand_id']: tt['translator_triage']['lantency_dropped'].append(tl) triaL = json.dumps(latency_dropped)", "(c) 2015-2018 AT&T Intellectual Property # # Licensed under the Apache License, Version", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "permissions and # limitations under the License. # # ------------------------------------------------------------------------- # import copy", "in writing, software # distributed under the License is distributed on an \"AS", "StringIO CONF = cfg.CONF io = StringIO() class TriageLatency(object): def __init__(self): self.TriageTool =", "governing permissions and # limitations under the License. # # ------------------------------------------------------------------------- # import", "from StringIO import StringIO CONF = cfg.CONF io = StringIO() class TriageLatency(object): def", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions') def", "affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import boto3 from aws_xray_sdk.core", "import json import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all patch_all()", "# SPDX-License-Identifier: MIT-0 import json import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core", "output = {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response = client.send_task_success( taskToken=obj['token'], output=json.dumps(output)", "Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import", "from aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for", "its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import boto3 from", "obj = json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response =", "client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record in event['Records']: payload=record[\"body\"] obj", "boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions')", "# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0", "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import", "All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import boto3 from aws_xray_sdk.core import", "def lambda_handler(event, context): print(event) for record in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output", "patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record in event['Records']:", "patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record in event['Records']: payload=record[\"body\"]", "json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response = client.send_task_success( taskToken=obj['token'],", "SPDX-License-Identifier: MIT-0 import json import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import", "json import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all patch_all() client", "for record in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task", "= {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response = client.send_task_success( taskToken=obj['token'], output=json.dumps(output) )", "Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json", "record in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token", "import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all patch_all() client =", "Reserved. # SPDX-License-Identifier: MIT-0 import json import boto3 from aws_xray_sdk.core import xray_recorder from", "in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token is", "aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record", "xray_recorder from aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event)", "import patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record in", "= json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response = client.send_task_success(", "Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import boto3 from aws_xray_sdk.core import xray_recorder", "MIT-0 import json import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all", "aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event,", "or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import boto3", "<reponame>wongcyrus/aws-stepfunctions-examples<gh_stars>10-100 # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier:", "import xray_recorder from aws_xray_sdk.core import patch_all patch_all() client = boto3.client('stepfunctions') def lambda_handler(event, context):", "print(event) for record in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status': 'successful'}", "payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response", "boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record in event['Records']: payload=record[\"body\"] obj = json.loads(payload)", "event['Records']: payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token']))", "{'shipping_status': 'successful'} print(\"Task token is {}\".format(obj['token'])) response = client.send_task_success( taskToken=obj['token'], output=json.dumps(output) ) print(response)", "context): print(event) for record in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output = {'shipping_status':", "= boto3.client('stepfunctions') def lambda_handler(event, context): print(event) for record in event['Records']: payload=record[\"body\"] obj =", "lambda_handler(event, context): print(event) for record in event['Records']: payload=record[\"body\"] obj = json.loads(payload) output =" ]
[ "if max_ending < 0: max_ending = 0 return max_sum def test_solution(): assert solution([3,", "= 0 for a in A: max_ending += a if max_ending > max_sum:", "if max_ending > max_sum: max_sum = max_ending if max_ending < 0: max_ending =", "= 0 return max_sum def test_solution(): assert solution([3, 2, -6, 4, -0]) ==", "0 return max_sum def test_solution(): assert solution([3, 2, -6, 4, -0]) == 5", "< 0: max_ending = 0 return max_sum def test_solution(): assert solution([3, 2, -6,", "= -maxsize max_ending = 0 for a in A: max_ending += a if", "A: max_ending += a if max_ending > max_sum: max_sum = max_ending if max_ending", "max_sum: max_sum = max_ending if max_ending < 0: max_ending = 0 return max_sum", "max_sum = max_ending if max_ending < 0: max_ending = 0 return max_sum def", "-maxsize max_ending = 0 for a in A: max_ending += a if max_ending", "from sys import maxsize def solution(A): max_sum = -maxsize max_ending = 0 for", "> max_sum: max_sum = max_ending if max_ending < 0: max_ending = 0 return", "max_ending = 0 return max_sum def test_solution(): assert solution([3, 2, -6, 4, -0])", "0 for a in A: max_ending += a if max_ending > max_sum: max_sum", "<filename>Maximum_slice_problem/MaxSliceSum.py from sys import maxsize def solution(A): max_sum = -maxsize max_ending = 0", "a in A: max_ending += a if max_ending > max_sum: max_sum = max_ending", "+= a if max_ending > max_sum: max_sum = max_ending if max_ending < 0:", "max_sum = -maxsize max_ending = 0 for a in A: max_ending += a", "sys import maxsize def solution(A): max_sum = -maxsize max_ending = 0 for a", "max_ending > max_sum: max_sum = max_ending if max_ending < 0: max_ending = 0", "max_ending = 0 for a in A: max_ending += a if max_ending >", "def solution(A): max_sum = -maxsize max_ending = 0 for a in A: max_ending", "for a in A: max_ending += a if max_ending > max_sum: max_sum =", "max_ending < 0: max_ending = 0 return max_sum def test_solution(): assert solution([3, 2,", "max_ending if max_ending < 0: max_ending = 0 return max_sum def test_solution(): assert", "solution(A): max_sum = -maxsize max_ending = 0 for a in A: max_ending +=", "in A: max_ending += a if max_ending > max_sum: max_sum = max_ending if", "0: max_ending = 0 return max_sum def test_solution(): assert solution([3, 2, -6, 4,", "max_ending += a if max_ending > max_sum: max_sum = max_ending if max_ending <", "maxsize def solution(A): max_sum = -maxsize max_ending = 0 for a in A:", "import maxsize def solution(A): max_sum = -maxsize max_ending = 0 for a in", "a if max_ending > max_sum: max_sum = max_ending if max_ending < 0: max_ending", "= max_ending if max_ending < 0: max_ending = 0 return max_sum def test_solution():" ]
[ "<reponame>ShulzLab/pGenUtils import os,sys _localpath = os.path.dirname(os.getcwd()) _packages_path = os.path.dirname(_localpath) print(_packages_path) sys.path.append(_packages_path) from pGenUtils.docs", "_localpath = os.path.dirname(os.getcwd()) _packages_path = os.path.dirname(_localpath) print(_packages_path) sys.path.append(_packages_path) from pGenUtils.docs import mkds_make_docfiles mkds_make_docfiles(_localpath)", "import os,sys _localpath = os.path.dirname(os.getcwd()) _packages_path = os.path.dirname(_localpath) print(_packages_path) sys.path.append(_packages_path) from pGenUtils.docs import", "os,sys _localpath = os.path.dirname(os.getcwd()) _packages_path = os.path.dirname(_localpath) print(_packages_path) sys.path.append(_packages_path) from pGenUtils.docs import mkds_make_docfiles" ]
[ "datetime, os, random already_sent = False randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function", "already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10 and not already_sent: already_sent = True", "in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\ \"\"\"\\ Good", "= False randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global already_sent,", "found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d}", "= False return None # Return None if nothing is to be send", "return text, framework.FILE(image) # Return message to be sent elif datum.hour == 11", "image = randomized_images.pop(0) text = \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\", "= [] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if", "os, random already_sent = False randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function def", "\\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) #", "[] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour", "{:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message to be sent", "\"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return", "not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image", "if datum.hour == 10 and not already_sent: already_sent = True if not randomized_images:", "get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10 and not already_sent: already_sent", "11 and already_sent: already_sent = False return None # Return None if nothing", "True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images:", "= \"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10", "already_sent: already_sent = True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in", "to be sent elif datum.hour == 11 and already_sent: already_sent = False return", "random already_sent = False randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data():", "= \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image)", "already_sent: already_sent = False return None # Return None if nothing is to", "- {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message to be sent elif", "@framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10 and not", "text, framework.FILE(image) # Return message to be sent elif datum.hour == 11 and", "randomized_images datum=datetime.datetime.now() if datum.hour == 10 and not already_sent: already_sent = True if", "and not already_sent: already_sent = True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for", "randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} -", "\"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message to be sent elif datum.hour ==", "elif datum.hour == 11 and already_sent: already_sent = False return None # Return", "found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0)", "and already_sent: already_sent = False return None # Return None if nothing is", "already_sent = False return None # Return None if nothing is to be", "Return message to be sent elif datum.hour == 11 and already_sent: already_sent =", "{:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message to be sent elif datum.hour", "== 11 and already_sent: already_sent = False return None # Return None if", "while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\ \"\"\"\\ Good morning @everyone\\nDate:", "be sent elif datum.hour == 11 and already_sent: already_sent = False return None", "Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message", "message to be sent elif datum.hour == 11 and already_sent: already_sent = False", "global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10 and not already_sent: already_sent =", "import framework, datetime, os, random already_sent = False randomized_images = [] IMAGE_PATH =", "already_sent = False randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global", "\"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10 and", "= randomized_images.pop(0) text = \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute)", "framework, datetime, os, random already_sent = False randomized_images = [] IMAGE_PATH = \"./app/images/\"", "datum.hour == 10 and not already_sent: already_sent = True if not randomized_images: found_images", "x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\ \"\"\"\\", "already_sent = True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")]", "morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message to", "sent elif datum.hour == 11 and already_sent: already_sent = False return None #", "def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour == 10 and not already_sent:", "datum=datetime.datetime.now() if datum.hour == 10 and not already_sent: already_sent = True if not", "= True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while", "framework.FILE(image) # Return message to be sent elif datum.hour == 11 and already_sent:", "randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image =", "randomized_images.pop(0) text = \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return", "False randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images", "randomized_images = [] IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now()", "for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\", "if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images))))", "@everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text, framework.FILE(image) # Return message to be", "os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text = \\ \"\"\"\\ Good morning", "datum.hour == 11 and already_sent: already_sent = False return None # Return None", "not already_sent: already_sent = True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x) for x", "10 and not already_sent: already_sent = True if not randomized_images: found_images = [os.path.join(IMAGE_PATH,x)", "IMAGE_PATH = \"./app/images/\" @framework.data_function def get_data(): global already_sent, randomized_images datum=datetime.datetime.now() if datum.hour ==", "text = \\ \"\"\"\\ Good morning @everyone\\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\\ \"\"\".format(datum.day,datum.month,datum.year,datum.hour,datum.minute) return text,", "[os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text =", "= [os.path.join(IMAGE_PATH,x) for x in os.listdir(\"./app/images\")] while found_images: randomized_images.append(found_images.pop(random.randrange(0,len(found_images)))) image = randomized_images.pop(0) text", "== 10 and not already_sent: already_sent = True if not randomized_images: found_images =", "# Return message to be sent elif datum.hour == 11 and already_sent: already_sent" ]
[ "Language :: Python :: 3\", \"Programming Language :: Python :: 3.6\", \"Programming Language", "3.8\", \"Programming Language :: Python :: 3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.6\", \"Programming", "(ISCL)\", \"Operating System :: POSIX\", \"Programming Language :: Python :: 3 :: Only\",", "\"Operating System :: POSIX\", \"Programming Language :: Python :: 3 :: Only\", \"Programming", "very simple client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[", "from setuptools import setup setup( name=\"ocaclient\", description=\"A very simple client for OCA's web", "setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Environment :: Web Environment\", \"Intended", "Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Topic :: Internet ::", ":: Web Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: ISC", "POSIX\", \"Programming Language :: Python :: 3 :: Only\", \"Programming Language :: Python", "url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\":", "System :: POSIX\", \"Programming Language :: Python :: 3 :: Only\", \"Programming Language", "Audience :: Developers\", \"License :: OSI Approved :: ISC License (ISCL)\", \"Operating System", "\"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming", "setup setup( name=\"ocaclient\", description=\"A very simple client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\",", "Only\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.6\",", "\"Programming Language :: Python :: 3 :: Only\", \"Programming Language :: Python ::", "install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"],", ":: 3\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python ::", ":: Python :: 3 :: Only\", \"Programming Language :: Python :: 3\", \"Programming", ":: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python ::", "\"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development", "packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", },", "\"Environment :: Web Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved ::", "3.7\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\",", "Web Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: ISC License", "5 - Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience :: Developers\", \"License ::", "name=\"ocaclient\", description=\"A very simple client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\",", "setuptools import setup setup( name=\"ocaclient\", description=\"A very simple client for OCA's web services.\",", "#!/usr/bin/env python from setuptools import setup setup( name=\"ocaclient\", description=\"A very simple client for", "Language :: Python :: 3 :: Only\", \"Programming Language :: Python :: 3\",", "\"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Environment ::", ":: Python :: 3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic :: Software Development", "Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language", ":: POSIX\", \"Programming Language :: Python :: 3 :: Only\", \"Programming Language ::", "for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\",", "], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status ::", "\"Programming Language :: Python :: 3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic ::", "Python :: 3 :: Only\", \"Programming Language :: Python :: 3\", \"Programming Language", "license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\",", "\"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[", "\"License :: OSI Approved :: ISC License (ISCL)\", \"Operating System :: POSIX\", \"Programming", "3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic :: Software Development :: Libraries ::", "Approved :: ISC License (ISCL)\", \"Operating System :: POSIX\", \"Programming Language :: Python", "author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\",", ":: 3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic :: Software Development :: Libraries", "Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Topic ::", ":: WWW/HTTP\", \"Topic :: Software Development :: Libraries :: Python Modules\", ], )", "ISC License (ISCL)\", \"Operating System :: POSIX\", \"Programming Language :: Python :: 3", "\"Topic :: Internet :: WWW/HTTP\", \"Topic :: Software Development :: Libraries :: Python", "Language :: Python :: 3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic :: Software", "classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience", "Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\", \"Programming Language", "import setup setup( name=\"ocaclient\", description=\"A very simple client for OCA's web services.\", author=\"<NAME>\",", "python from setuptools import setup setup( name=\"ocaclient\", description=\"A very simple client for OCA's", "services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={", ":: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language ::", "client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\",", "\"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status", "Python :: 3.9\", \"Topic :: Internet :: WWW/HTTP\", \"Topic :: Software Development ::", "Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved", ":: ISC License (ISCL)\", \"Operating System :: POSIX\", \"Programming Language :: Python ::", ":: 3.7\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python ::", "OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ],", "3 :: Only\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python", "\"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 - Production/Stable\",", ":: Only\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python ::", "author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\":", "Internet :: WWW/HTTP\", \"Topic :: Software Development :: Libraries :: Python Modules\", ],", ":: 3.8\", \"Programming Language :: Python :: 3.9\", \"Topic :: Internet :: WWW/HTTP\",", "- Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience :: Developers\", \"License :: OSI", ":: OSI Approved :: ISC License (ISCL)\", \"Operating System :: POSIX\", \"Programming Language", ":: 3 :: Only\", \"Programming Language :: Python :: 3\", \"Programming Language ::", "simple client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\",", ":: 5 - Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience :: Developers\", \"License", "description=\"A very simple client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"],", "Python :: 3.7\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python", "3\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\",", "Python :: 3\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python", "Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python", "License (ISCL)\", \"Operating System :: POSIX\", \"Programming Language :: Python :: 3 ::", "Developers\", \"License :: OSI Approved :: ISC License (ISCL)\", \"Operating System :: POSIX\",", "\"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Topic", "\"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Environment :: Web", "setup( name=\"ocaclient\", description=\"A very simple client for OCA's web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\",", "\"Intended Audience :: Developers\", \"License :: OSI Approved :: ISC License (ISCL)\", \"Operating", ":: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Topic :: Internet", "\"Development Status :: 5 - Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience ::", ":: Developers\", \"License :: OSI Approved :: ISC License (ISCL)\", \"Operating System ::", "Status :: 5 - Production/Stable\", \"Environment :: Web Environment\", \"Intended Audience :: Developers\",", "}, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Environment :: Web Environment\",", "web services.\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/WhyNotHugo/ocaclient\", license=\"MIT\", packages=[\"ocaclient\"], install_requires=[ \"python-dateutil\", \"lxml\", \"zeep>=3.0.0\", ], long_description=open(\"README.rst\").read(),", "use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 -", ":: Python :: 3\", \"Programming Language :: Python :: 3.6\", \"Programming Language ::", "long_description=open(\"README.rst\").read(), use_scm_version={ \"version_scheme\": \"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5", ":: Python :: 3.7\", \"Programming Language :: Python :: 3.8\", \"Programming Language ::", "Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: ISC License (ISCL)\",", "\"Programming Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\", \"Programming", "3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\",", ":: Internet :: WWW/HTTP\", \"Topic :: Software Development :: Libraries :: Python Modules\",", "\"post-release\", \"write_to\": \"ocaclient/version.py\", }, setup_requires=[\"setuptools_scm\"], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Environment", "OSI Approved :: ISC License (ISCL)\", \"Operating System :: POSIX\", \"Programming Language ::" ]
[ "unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self): self.assertTrue(True) if __name__ == '__main__': unittest.main()", "utf-8 -*- import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self): self.assertTrue(True) if __name__", "<reponame>dgwhited/project_lockdown<filename>tests/test.py # -*- coding: utf-8 -*- import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def", "-*- coding: utf-8 -*- import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self): self.assertTrue(True)", "-*- import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self): self.assertTrue(True) if __name__ ==", "# -*- coding: utf-8 -*- import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self):", "coding: utf-8 -*- import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self): self.assertTrue(True) if", "import unittest class ExampleTestCases(unittest.TestCase): \"\"\"test case\"\"\" def test_check_test(self): self.assertTrue(True) if __name__ == '__main__':" ]
[ "2, 2, 1, 2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs = [1,", "Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z'))", "plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path =", "plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2),", ".testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i)", "7] holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5),", "np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def", "self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7),", "0, 7] holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5,", "= mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0]", "1, 2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs = [1, 2, 3,", "2, 2, 1, 2, 2, 1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6,", "np.array([ (1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),", "(7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path", "[] ] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value'])", "2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6), (2.1, 4.5),", "3] ys = [2, 0, 7, np.nan, 7, 5, 2] holes = [", "Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons =", "4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2,", "3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) def", "4.5), (2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])", "test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan, 6, 7, 3] ys = [2,", "range(10)]], level=j) for j in range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot in", "2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly =", "ys = [2, 0, 7] holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)],", "Polygons([[(i**j, i) for i in range(10)]], level=j) for j in range(5)}) plot =", "'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths()", "def test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan, 6, 7, 3] ys =", "holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1]))", "5), (2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot =", "ys, 'holes': holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(),", "3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], [] ] poly =", "np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs =", "self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys = [2,", "plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices,", "(2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))", "= NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j) for j in range(5)})", "2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1,", "artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path =", "(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6), (2.1,", "np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices,", "5, 2] holes = [ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5),", "level=j) for j in range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()):", "paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3),", "2, 2])) def test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan, 6, 7, 3]", "2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], [] ]", "2, 3, np.nan, 6, 7, 3] ys = [2, 0, 7, np.nan, 7,", "2])) def test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan, 6, 7, 3] ys", "3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) path2", "(2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], [] ] poly", "self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot):", "plot = mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j]))", "TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for cat", "TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)", "for i in range(10)]], level=j) for j in range(5)}) plot = mpl_renderer.get_plot(polygons) for", "3.5)]]] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist", "(3, 7), (1.5, 2), (2, 3), (1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3,", "3, np.nan, 6, 7, 3] ys = [2, 0, 7, np.nan, 7, 5,", "[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], []", "'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths", "(2.1, 4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2,", "'z': cat} for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist", "cat} for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist =", "2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan, 6,", "self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7),", "in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs", "(3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x',", "'y': ys, 'holes': holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist']", "Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist", "np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2),", "poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist =", "from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j,", "(1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], [] ] poly = Polygons([{'x':", "range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(),", "7, 5, 2] holes = [ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1,", "Contours from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j:", "holes = [ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5),", "3.5)]], [] ] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}],", "[2, 0, 7, np.nan, 7, 5, 2] holes = [ [[(1.5, 2), (2,", "def test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys = [2, 0, 7] holes", "import NdOverlay from holoviews.element import Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer class", "(2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot", "(2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6), (2.1, 4.5), (2.5,", "= artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0),", "(1.5, 2), (2, 3), (1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)]) )", "0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6), (2.1, 4.5), (2.5, 5),", "2] holes = [ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5,", "6, 7, 3] ys = [2, 0, 7, np.nan, 7, 5, 2] holes", "2, 1, 2, 2, 1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7),", "path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7), (1.5, 2),", "vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths()", "4)) def test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys = [2, 0, 7]", "NdOverlay from holoviews.element import Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot):", "from holoviews.element import Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def", "holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3,", "= [1, 2, 3, np.nan, 6, 7, 3] ys = [2, 0, 7,", "2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z':", "2, 1, 2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs = [1, 2,", "7), (1.5, 2), (2, 3), (1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)])", "def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j) for", "holoviews.core import NdOverlay from holoviews.element import Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer", "xs = [1, 2, 3] ys = [2, 0, 7] holes = [[[(1.5,", "(2.3, 3.5)]], [] ] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value':", "import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for", "as np from holoviews.core import NdOverlay from holoviews.element import Polygons, Contours from .testplot", "j in range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist =", "<filename>holoviews/tests/plotting/matplotlib/testpathplot.py import numpy as np from holoviews.core import NdOverlay from holoviews.element import Polygons,", "for j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4))", "test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j) for j", "np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1,", "paths = artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2,", "path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for cat in ('B', 'A',", "test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys = [2, 0, 7] holes =", "1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2),", "2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan,", "ys = [2, 0, 7, np.nan, 7, 5, 2] holes = [ [[(1.5,", ") self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) path2 =", "paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class", "mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices,", "from holoviews.core import NdOverlay from holoviews.element import Polygons, Contours from .testplot import TestMPLPlot,", "(1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y':", "xs, 'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths =", "[(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], [] ] poly = Polygons([{'x': xs, 'y':", "enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs =", "5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path =", "4.5), (2.5, 5), (2.3, 3.5)]], [] ] poly = Polygons([{'x': xs, 'y': ys,", "[ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],", "2, 3] ys = [2, 0, 7] holes = [[[(1.5, 2), (2, 3),", "[(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y': ys, 'holes':", "= paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7), (1.5, 2), (2,", "(2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1,", "self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10,", "2, 2, 1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5),", "(1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2,", "self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self): xs", "artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3,", "1, 2, 2])) def test_multi_polygon_hole_plot(self): xs = [1, 2, 3, np.nan, 6, 7,", "= [2, 0, 7, np.nan, 7, 5, 2] holes = [ [[(1.5, 2),", "self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6,", "= plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1,", "in range(10)]], level=j) for j in range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot", "5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2,", "mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for i in", "(2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x':", "i) for i in range(10)]], level=j) for j in range(5)}) plot = mpl_renderer.get_plot(polygons)", "import numpy as np from holoviews.core import NdOverlay from holoviews.element import Polygons, Contours", "np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys =", "3), (1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2,", "self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([", "class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for", "splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs = [1, 2, 3]", "in range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist']", "2), (2, 3), (1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes,", "= plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0]", "def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for cat in", "numpy as np from holoviews.core import NdOverlay from holoviews.element import Polygons, Contours from", "artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([", "0, 7, np.nan, 7, 5, 2] holes = [ [[(1.5, 2), (2, 3),", "3] ys = [2, 0, 7] holes = [[[(1.5, 2), (2, 3), (1.6,", "NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j) for j in range(5)}) plot", "= mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(),", "= [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]]", "= [ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3,", "= splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs = [1, 2,", "= Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly)", "xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist =", "import Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons", "[1, 2, 3] ys = [2, 0, 7] holes = [[[(1.5, 2), (2,", "2, 1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3,", "2), 'z': cat} for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path)", "np from holoviews.core import NdOverlay from holoviews.element import Polygons, Contours from .testplot import", "1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths =", "1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)]))", "in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1,", "= artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0),", "= [1, 2, 3] ys = [2, 0, 7] holes = [[[(1.5, 2),", "'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 0, 1]))", "5), (2.3, 3.5)]], [] ] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes,", "class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]],", "'holes': holes, 'value': 1}], vdims=['value']) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1,", "= Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for cat in ('B', 'A', 'B')],", "2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat}", "[[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly", "= mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2)", "= paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2]))", "2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self): path = Contours([{('x', 'y'):", "splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self):", "path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2,", "('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 0,", "= [2, 0, 7] holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1,", "(2, 3), (1.6, 1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1,", "2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7), (1.5,", "artist.get_paths() self.assertEqual(len(paths), 2) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3,", "'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1)", "mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths), 2) path", "paths = artist.get_paths() self.assertEqual(len(paths), 1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2,", "TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self): polygons = NdOverlay({j: Polygons([[(i**j, i) for i", "3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x': xs,", "7, np.nan, 7, 5, 2] holes = [ [[(1.5, 2), (2, 3), (1.6,", "[1, 2, 3, np.nan, 6, 7, 3] ys = [2, 0, 7, np.nan,", "'y'): np.random.rand(10, 2), 'z': cat} for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot", "= Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist']", "i in range(10)]], level=j) for j in range(5)}) plot = mpl_renderer.get_plot(polygons) for j,", "j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def", "1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]], [] ] poly = Polygons([{'x': xs,", "cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist = plot.handles['artist'] self.assertEqual(artist.get_array(),", "holoviews.element import Polygons, Contours from .testplot import TestMPLPlot, mpl_renderer class TestPolygonPlot(TestMPLPlot): def test_polygons_colored(self):", "[2, 0, 7] holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5),", "1, 2, 2, 1, 2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7,", "artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs = [1,", "np.nan, 7, 5, 2] holes = [ [[(1.5, 2), (2, 3), (1.6, 1.6)],", "polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j) for j in", "holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths), 1) path", "Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths", "np.nan, 6, 7, 3] ys = [2, 0, 7, np.nan, 7, 5, 2]", "plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] self.assertEqual(artist.get_array(), np.array([1, 1])) paths = artist.get_paths() self.assertEqual(len(paths),", "np.random.rand(10, 2), 'z': cat} for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot =", "] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value']) plot", "mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist = splot.handles['artist'] self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0,", "poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value']) plot =", "for j in range(5)}) plot = mpl_renderer.get_plot(polygons) for j, splot in enumerate(plot.subplots.values()): artist", "7, 3] ys = [2, 0, 7, np.nan, 7, 5, 2] holes =", "1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y': ys,", "(2.5, 5), (2.3, 3.5)]], [] ] poly = Polygons([{'x': xs, 'y': ys, 'holes':", "self.assertEqual(artist.get_array(), np.array([j])) self.assertEqual(artist.get_clim(), (0, 4)) def test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys", "7), (7, 5), (3, 2)])) self.assertEqual(path2.codes, np.array([1, 2, 2])) class TestContoursPlot(TestMPLPlot): def test_contours_categorical_color(self):", "for cat in ('B', 'A', 'B')], vdims='z').opts(plot=dict(color_index='z')) plot = mpl_renderer.get_plot(path) artist = plot.handles['artist']", "(0, 4)) def test_polygon_with_hole_plot(self): xs = [1, 2, 3] ys = [2, 0,", "test_contours_categorical_color(self): path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat} for cat in ('B',", "(2.3, 3.5)]]] poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly)", "2, 2])) path2 = paths[1] self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)])) self.assertEqual(path2.codes,", "1.6), (2.1, 4.5), (2.5, 5), (2.3, 3.5)]) ) self.assertEqual(path.codes, np.array([1, 2, 2, 1,", "xs = [1, 2, 3, np.nan, 6, 7, 3] ys = [2, 0,", ") self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) def test_multi_polygon_hole_plot(self):", "ys, 'holes': holes}]) plot = mpl_renderer.get_plot(poly) artist = plot.handles['artist'] paths = artist.get_paths() self.assertEqual(len(paths),", "self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2])) path2 = paths[1]", "1) path = paths[0] self.assertEqual(path.vertices, np.array([ (1, 2), (2, 0), (3, 7), (1.5," ]
[ "the only \" \"types recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key", "= np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i = 0 for source_name in sorted(sources):", "an internal node which contains actual data for each stage. \"\"\" def __init__(self):", "and maximum dispatch values. key = source + ' ' + str(i +", "__init__(self): self.root = None def set_root(self, node): self.root = node def write_json_files(self, output_directory):", "= 'Scenarios' for source in sources: plt.figure(source) for scenario in self.scenarios: source_scenario =", "scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\"", "details about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data = load_data", "the dictionary mapping keys to scenario values. \"\"\" # A dictionary of data", "Copies the power generation data of the day for the next 24 hours,", "import skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses from", "the scenario power_dict (dict): A mapping from source names to lists of 24", "Scenario tree. Each node has an associated name, probability, data, and pointers to", "scenarios as well as the paths from all scenarios. We assume independence across", "\"\"\" # A dictionary of data with strings as keys and the minimum", "from the scenario. Sets the parent to root currently. Returns: Raw_Node_Data: The equivalent", "Returns: axis: The axis plotted to \"\"\" if axis is None: fig, axis", "pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)])", "store a 24-vector of power-values produced. Attributes: name (str): The name of the", "= power_vector def plot(self, axis=None): \"\"\" Simple plotting routing which will plot all", "list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend()", "raw data in this sense refers to the 24-vector of the power generation", "= source + ' ' + str(i + 1) value = data[sources_key][key] elif", "list of CommentedRawNodeData objcts instantiated from each of the scenarios. Returns: list[CommentedRawNodeData]: The", "values. \"\"\" # A dictionary of data with strings as keys and the", "will have a PowerScenario object and the corresponding paths # used to create", "directory Args: directory: the directory to store the json file in \"\"\" #", "value between 0 and 1 representing the probability of the scenario comments (str):", "associated probability. Args: name (str): The name of the scenario power_dict (dict[str,List[float]]): This", "in the scenario Returns: SkeletonScenario: The scenario with power and load values \"\"\"", "plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting routine for the scenarios. This will", "string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name,", "files for each of the scenarios in the tree \"\"\" for child in", "inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\"", "return axis def add_load_data(self, load_data, sources): \"\"\" This will create a SkeletonScenario object", "the raw data for this scenario. The raw data in this sense refers", "to the directory to store the scenario file \"\"\" scen_file = directory +", "of load sources to 24-vectors of load values sources (List[ExtendedSource]): A list of", "self.scenarios] def create_tree(self): \"\"\" This creates an instance of the Scenario Tree class", "Revised BSD License. # ___________________________________________________________________________ import datetime import json import os from collections", "minimum # and maximum dispatch values. key = source + ' ' +", "LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.", "= self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function writes", "dispatch * forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data):", "sources to aggregate aggregate_sources (str): The name of the aggregate source \"\"\" power_vector", "os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys())", "hours. for source in self.load_data: key = source + ' ' + str(i", "24' value = data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has type '{}', the", "in sources} self.dispatches = {source.name: source.frac_nondispatch for source in sources} def scenario_data(self): \"\"\"", "= [scen.scenario for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we merge", "source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated:", "blank space \"\"\" # In the case of solar power, the passed forecast", "def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string += \"{}:\\n\".format(key)", "def __init__(self, name, probability, data=None, parent=None, comments=''): \"\"\" Initializes an object of the", "def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will update the dictionary of power", "24 hours. for source in self.load_data: key = source + ' ' +", "comments = '' # We merge name, power dictionaries, probabilities, comments for scenario", "A list of raw scenario nodes \"\"\" return [child.to_raw_node() for child in self.root.children]", "power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes an object of the SkeletonScenario class.", "for i, val in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector", "power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string +=", "return [scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self): \"\"\" This creates an instance", "things \"\"\" def __init__(self, name, power_dict, prob, comments=''): \"\"\" To initialize a PowerScenario", "the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date,", "things \"\"\" self.name = name self.power_dict = power_dict self.probability = prob self.comments =", "in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory)", "ScenarioWithPaths objects and return a ScenarioWithPaths objects which has the power generation vectors", "internal node to the children list Args: node (InternalNode): An InternalNode object \"\"\"", "scen in self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\" This function will change", "\"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format(", "\"\"\" This creates an instance of the Scenario Tree class using self.scenarios. Returns:", "aggregate_source, disaggregated, is_load=False): \"\"\" This method will update the dictionary of power values", "objects and return a ScenarioWithPaths objects which has the power generation vectors from", "A mapping from source names to lists of 24 floats of power generation", "open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for", "' ' + str(i + 25) data[sources_key][key] = value return data def to_raw_node(self):", "\"\"\" prob_sum = sum(scen.probability for scen in self.scenarios) for scen in self.scenarios: scen.probability", "the dictionry in-place. Args: dict_ (dict): The dictionry to disaggregate aggregate_source (str): The", "\"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated: self.types[other]", "self.name, parent_name, self.probability) with open(directory + os.sep + filename, 'w') as f: json.dump(self.data,", "= ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios", "the values. This will update the dictionry in-place. Args: dict_ (dict): The dictionry", "in the newly created object Args: load_data (dict[str,List[float]]): A dictionary mapping names of", "self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function writes out the raw", "Returns: (Raw_Node_Data): raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability,", "each with a proportion of the values. Args: aggregate_source (str): The name of", "prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes json file for this node to", "to parents and children. \"\"\" def __init__(self, name, probability, data=None, parent=None, comments=''): \"\"\"", "self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object for", "as the paths from all scenarios. We assume independence across the scenarios. Args:", "' ' + str(i + 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy", "data. Args: write_directory: the directory to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability,", "other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source]", "nondispatchable forecast (float): the forecast value Returns: string: the minimum and the maximum", "comments (str): Additional details about how scenario was created among other things \"\"\"", "will construct the dictionary mapping keys to scenario values. \"\"\" # A dictionary", "name (str): The name of the scenario power_dict (dict): A mapping from source", "plotted to \"\"\" if axis is None: fig, axis = plt.subplots() for name,", "the scenario comments (str): Additional details about how scenario was created among other", "a file called 'scenarios.csv' in the directory specified. It is necessary to pass", "tree def normalize_probabilities(self): \"\"\" This function will normalize the probabilities of the scenarios", "in sources: plt.figure(source) for scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2,", "Save the load forecast. forecast = self.load_data[source][i] key = source + ' '", "h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_' # Display", "flag to indicate whether the source to disaggregate is a load source \"\"\"", "data of the day for the next 24 hours, depending on the type", "return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string", "normalize_names(self): \"\"\" This function will change the names of the scenarios to be", "if not os.path.isdir(directory): os.makedirs(directory) # This is a little hack to get the", "ScenarioTree: the scenario tree \"\"\" root = InternalNode(\"root\", probability=1) for scenario in self.scenarios:", "plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates", "source + ' ' + str(i + 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key]", "import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import", "scenario was created among other things \"\"\" def __init__(self, name, power_dict, prob, comments=''):", "by the name of the scenario. Args: directory (str): A path to the", "the power of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names,", "'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source:", "each of the scenarios in the tree \"\"\" for child in self.root.children: child.write_json(output_directory)", "plot to Returns: axis: The axis plotted to \"\"\" if axis is None:", "+ source, y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios):", "Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n' for child in", "Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability,", "Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments)", "independence. Args: scenarios (List[PowerScenario]): The list of scenarios to merge Returns: PowerScenario: A", "is root parent_name = 'root' if self.parent is None else self.parent.name filename =", "the directory to store the json file in \"\"\" # if no parent", "in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class will only", "color='grey', linestyle='--') label = '_nolegend_' # Display a legend. lgd = plt.legend(loc='lower center',", "any knowledge of the date of the scenario. Args: directory (str): The path", "This will create a file called 'scenarios.csv' in the directory specified. It is", "aggregate_source): \"\"\" This method will add up all the source power vectors for", "title of the plot dps (dict): the day part separators for each source", "dictionaries path_dict = {} for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class", "expected scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an object of", "scenario. Sets the parent to root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object", "scen_name = 'forecasts' else: scen_name = scenario.name scenario_name = source_name + ': '", "attribute will point to a dictionary # of the form {source_name -> OneDimPath}", "self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch,", "name self.probability = probability self.parent = parent self.data = data self.children = []", "plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a grid and the axes.", "objects. Returns: (List[Raw_Node_Data]): A list of raw scenario nodes \"\"\" return [child.to_raw_node() for", "\"\"\" Writes json file for this node to the specified directory Args: directory:", "ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects", "method will add up all the source power vectors for the sources provided", "This software is distributed under the Revised BSD License. # ___________________________________________________________________________ import datetime", "i in range(24): for source in self.power_dict: # Translate the power generation values", "load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\"", "of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,", "disaggregate is a load source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated)", "a PowerScenario object, one must pass a scenario name, a dictionary mapping source", "as pd from prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen import", "of scenarios actual_scenario (SkeletonScenario): the scenario from the actual data expected_scenario (SkeletonScenario): the", "all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i = 0", "def all_scenarios(self): \"\"\" This property returns the list of probabilistic scenarios in addition", "string class ScenarioTree: \"\"\" Basic Tree representation of a set of scenarios. The", "objects power_scenarios = [scen.scenario for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then", "list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25,", "The list of scenarios including the actual and expected scenario \"\"\" def __init__(self,", "scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The named", "json file in \"\"\" # if no parent specified, assume parent is root", "scenario \"\"\" self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario = expected self.source_names =", "= source + ' 24' value = data[sources_key][key] else: raise RuntimeError(\"Power source '{}'", "\"\"\" def __init__(self, name, power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes an object", "individual node in the Scenario tree. Each node has an associated name, probability,", "PowerScenario objects power_scenarios = [scen.scenario for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) #", "(str): A string containing extra details about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict,", "will delete all the original source power vectors. Args: source_names (list[str]): Names of", "to_raw_node(self): \"\"\" Converts the internal node into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data):", "node def write_json_files(self, output_directory): \"\"\" Writes json files for each of the scenarios", "source_type in ['solar', 'hydro']: key = source + ' ' + str(i +", "to the directory specified. Raw refers to the fact that the file will", "source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which merges", "axis: The axis plotted to \"\"\" if axis is None: fig, axis =", "power generation and the probabilities. This will create a file called 'scenarios.csv' in", "This creates a scenario which merges all the power dictionaries of the PowerScenario", "24-vector of power-values produced. Attributes: name (str): The name of the scenario power_dict", "proportion of the values. This will update the dictionry in-place. Args: dict_ (dict):", "PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data = load_data self.types = {source.name: source.source_type for", "sources, comments=''): \"\"\" Initializes an object of the SkeletonScenario class. Args: power_dict (dict):", "in the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This is a little", "Representation for an individual node in the Scenario tree. Each node has an", "mpl import matplotlib.pyplot as plt import pandas as pd from prescient.gosm.structures import skeleton_point_paths", "range(24): for source in self.power_dict: # Translate the power generation values into strings", "to scenario files as well. Attributes: scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario", "scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class will", "' + str(i + 1) value = data[sources_key][key] elif source_type in ['wind']: key", "are supposed to be in the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) #", "plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range,", "color='k') plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create", "values prob (float): the probability of the scenario load_data (dict[str,List[float]]): a dictionary mapping", "forecast to the plot. if self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range,", "the name aggregate_source. It will delete all the original source power vectors. Args:", "# Duplicate the load forecast for the next 24 hours. for source in", "for name in source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i] += val del", "to a dictionary # of the form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths',", "with the load data passed in. Note this will not copy the values,", "the other scenarios \"\"\" name = \"\" power_dict = {} probability = 1", "all probabilities as we assume independence. Args: scenarios (List[PowerScenario]): The list of scenarios", "each source if they are supposed to be in the plot \"\"\" if", "key = source + ' ' + str(i + 1) data[load_key][source+' '+str(i+25)] =", "__repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name,", "\"\"\" def __init__(self): self.root = None def set_root(self, node): self.root = node def", "(str): Additional details about how scenario was created among other things \"\"\" def", "{}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n' for child in self.children: string +=", "InternalNode: \"\"\" Representation for an individual node in the Scenario tree. Each node", "< other.name class SkeletonScenario(PowerScenario): \"\"\" This class should contain all the data parameters", "[] i = 0 for source_name in sorted(sources): for scenario in all_scenarios: if", "self.types.items(): if source_type in ['solar', 'hydro']: key = source + ' ' +", "on the type of the respective source. \"\"\" for i in range(24): for", "scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class will only contain", "source \"\"\" power_vector = [0]*24 for name in source_names: for i, val in", "data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i = 0 for source_name in", "power generation values for the next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source,", "by some other function, they will be changed in the newly created object", "keys and the minimum and maximum # dispatch values as (str) values. data", "it. The paths attribute will point to a dictionary # of the form", "dictionaries of the PowerScenario objects passed in. It will construct a name which", "value = data[sources_key][key] elif source_type in ['wind']: key = source + ' 24'", "raise RuntimeError(\"Power source '{}' has type '{}', the only \" \"types recognized are", "(List[Raw_Node_Data]): A list of raw scenario nodes \"\"\" return [child.to_raw_node() for child in", "[scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values i += 1 scenario_frame = pd.DataFrame(data=data,", "is a load source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for", "key, data in self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string", "aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will add up all the source power", "= source + ' ' + str(i + 1) data[load_key][source+' '+str(i+25)] = \\", "'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt,", "in the tree \"\"\" for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This", "scenario files to the directory specified. Raw refers to the fact that the", "\"\"\" return [child.to_raw_node() for child in self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root)", "information. The name of the file will be Scenario_<name>.dat where <name> is replaced", "PowerScenario and merged path dictionary \"\"\" # We first merge the PowerScenario objects", "'data'. \"\"\" def __init__(self, name, power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes an", "(dict): the day part separators for each source if they are supposed to", "prob_sum = sum(scen.probability for scen in self.scenarios) for scen in self.scenarios: scen.probability /=", "import OrderedDict, namedtuple import numpy as np import matplotlib as mpl import matplotlib.pyplot", "from the actual data expected_scenario (SkeletonScenario): the scenario from the forecast data all_scenarios", "will add up all the source power vectors for the sources provided and", "\"\"\" def __init__(self, name, probability, data=None, parent=None, comments=''): \"\"\" Initializes an object of", "expected scenario. Returns: list[SkeletonScenario]: The list of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario]", "source names to lists of 24 floats of power generation over the day", "power_scenarios = [scen.scenario for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we", "this case, set it to 0. forecast = 0 if forecast is None", "self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will update the", "with the name aggregate_source. It will delete all the original source power vectors.", "PowerScenario object and the corresponding paths # used to create it. The paths", "of the power of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self,", "This method will add up all the source power vectors for the sources", "\\ scenario.power_dict[source_name] data[:,i] = values i += 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns)", "we merge their path dictionaries path_dict = {} for scen in scenarios: path_dict.update(scen.paths)", "the original source power vectors. Args: source_names (list[str]): Names of the sources to", "str(i + 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy the power generation", "sense refers to the 24-vector of the power generation values produced in a", "data in self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string +=", "key = source + ' ' + str(i + 1) value = data[sources_key][key]", "for the specified source by a collection of sources each with a proportion", "independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to", "axis is None: fig, axis = plt.subplots() for name, vect in self.power_dict.items(): xs", "must pass a scenario name, a dictionary mapping source names to lists of", "to the InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node)", "all single skeleton scenarios and have methods for exporting data to scenario files", "the load data passed in. Note this will not copy the values, so", "a proportion of the values. This will update the dictionry in-place. Args: dict_", "the source names, these are stored # as keys in the dictionary of", "path dictionary \"\"\" # We first merge the PowerScenario objects power_scenarios = [scen.scenario", "node to the specified directory Args: directory: the directory to store the json", "associated probability and name. For each source of interest, this will store a", "name of the scenario. Args: directory (str): A path to the directory to", "= values i += 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep", "and the maximum dispatch value, separated by a blank space \"\"\" # In", "is not None: label = 'Day Part Separators' for h in dps[source]: plt.axvline(x=h,", "os.makedirs(directory) # This is a little hack to get the source names, these", "scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This function", "24 floats of power generation over the day probability (float): A value between", "= dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power = [proportion*value for", "power generation data of the day for the next 24 hours, depending on", "return tree def normalize_probabilities(self): \"\"\" This function will normalize the probabilities of the", "in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum and", "os.path.isdir(directory): os.makedirs(directory) # This is a little hack to get the source names,", "stage. \"\"\" def __init__(self): self.root = None def set_root(self, node): self.root = node", "\" \"and 'hydro'.\".format(source, source_type)) key = source + ' ' + str(i +", "# Government retains certain rights in this software. # This software is distributed", "= plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a grid and the", "value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum and the maximum dispatch", "data for each stage. \"\"\" def __init__(self): self.root = None def set_root(self, node):", "scenario in all_scenarios: if scenario.name == 'expected': scen_name = 'forecasts' else: scen_name =", "(dict[str,List[float]]): A dictionary mapping names of load sources to 24-vectors of load values", "The actual scenario expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario", "(Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title,", "store the json file in \"\"\" # if no parent specified, assume parent", "skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory", "has type '{}', the only \" \"types recognized are 'solar', 'wind', \" \"and", "the next 24 hours, depending on the type of the respective source. \"\"\"", "into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing scenario \"\"\" return", "the power generation data for that given source. Args: directory (str): The name", "scenario tree \"\"\" root = InternalNode(\"root\", probability=1) for scenario in self.scenarios: # We", "dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes json file for", "color='r') if self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b')", "# Create a title. plt.title(title + source, y=1.08) plt.savefig(directory + os.sep + source,", "import json import os from collections import OrderedDict, namedtuple import numpy as np", "# Translate the power generation values into strings of minimum # and maximum", "None else forecast min_dispatch = dispatch * forecast value = \"{} {}\\n\".format(min_dispatch, forecast)", "generation data of the day for the next 24 hours, depending on the", "\"\"\" This class should contain all the data parameters and values that change", "the scenario \"\"\" self.name = name self.probability = probability self.parent = parent self.data", "Args: directory: the directory to store the json file in \"\"\" # if", "source if they are supposed to be in the plot \"\"\" if not", "scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data = load_data self.types = {source.name:", "1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy the power generation values for", "pd from prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses", "Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\"", "lists of 24 floats and an associated probability. Args: name (str): The name", "\"\"\" This class will only contain information about power generation and the associated", "newly created object Args: load_data (dict[str,List[float]]): A dictionary mapping names of load sources", "def add_load_data(self, load_data, sources): \"\"\" This will create a SkeletonScenario object using the", "to 0. forecast = 0 if forecast is None else forecast min_dispatch =", "other things \"\"\" def __init__(self, name, power_dict, prob, comments=''): \"\"\" To initialize a", "InternalNode class. Args: name (str): the name of the scenario probability (float): the", "power generation values prob (float): the probability of the scenario load_data (dict[str,List[float]]): a", "Args: name (str): the name of the scenario probability (float): the probability of", "\"\"\" scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f:", "the plot dps (dict): the day part separators for each source if they", "# used to create it. The paths attribute will point to a dictionary", "def write_json_files(self, output_directory): \"\"\" Writes json files for each of the scenarios in", "\"\"\" Writes json files for each of the scenarios in the tree \"\"\"", "\"\"\" To initialize a PowerScenario object, one must pass a scenario name, a", "Display a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display", "for source in self.load_data: key = source + ' ' + str(i +", "string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n'", "+ \\ scenario.power_dict[source_name] data[:,i] = values i += 1 scenario_frame = pd.DataFrame(data=data, index=index,", "vectors. Args: source_names (list[str]): Names of the sources to aggregate aggregate_sources (str): The", "load source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in", "= 0 for source_name in sorted(sources): for scenario in all_scenarios: if scenario.name ==", "None if the # respective hour lies outside the hours of sunshine. #", "for inner_key, inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return string class", "\"\"\" Initializes an object of the SkeletonScenario class. Args: power_dict (dict): a dictionary", "name, a dictionary mapping source names to lists of 24 floats and an", "a new source with the name aggregate_source. It will delete all the original", "scenario with power and load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources,", "mapping names of load sources to 24-vectors of load values sources (List[ExtendedSource]): A", "Initializes an object of the InternalNode class. Args: name (str): the name of", "scenario files as well. Attributes: scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario):", "scenarios self.actual_scenario = actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self):", "Additional details about how scenario was created among other things \"\"\" self.name =", "Returns: PowerScenario: A scenario which is formed by merging all the other scenarios", "are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key = source + ' '", "all_scenarios(self): \"\"\" This property returns the list of probabilistic scenarios in addition to", "maximum dispatch value for the forecast. Args: dispatch (float): The fraction nondispatchable forecast", "the true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of raw scenario", "scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths:", "sources} def scenario_data(self): \"\"\" This will construct the dictionary mapping keys to scenario", "for each of the scenarios in the tree \"\"\" for child in self.root.children:", "\"\"\" for i in range(24): for source, source_type in self.types.items(): if source_type in", "dictionary mapping source names to lists of 24 floats and an associated probability.", "specified directory Args: directory: the directory to store the json file in \"\"\"", "the data of the scenario parent: the parent node comments: A string detailing", "is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps", "self.probability = probability self.parent = parent self.data = data self.children = [] self.comments", "source_type)) key = source + ' ' + str(i + 25) data[sources_key][key] =", "\".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other): return self.name", "value for the forecast. Args: dispatch (float): The fraction nondispatchable forecast (float): the", "is None else forecast min_dispatch = dispatch * forecast value = \"{} {}\\n\".format(min_dispatch,", "value return data def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object from the", "proportion of the power of the original source \"\"\" aggregated_power = dict_[aggregate_source] del", "axis def add_load_data(self, load_data, sources): \"\"\" This will create a SkeletonScenario object using", "probability. Args: name (str): The name of the scenario power_dict (dict[str,List[float]]): This is", "used to create it. The paths attribute will point to a dictionary #", "list(self.scenarios[0].power_dict.keys()) # Create a plot for every source and add all scenarios. label", "of the scenario power_dict (dict[str,List[float]]): This is a dictionary mapping source names to", "return a ScenarioWithPaths objects which has the power generation vectors from all scenarios", "every source and add all scenarios. label = 'Scenarios' for source in sources:", "as f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string = \"Internal Node {}:\\nprobability:", "(dict[str,float]): A dictionary mapping names of the new sources to the proportion of", "instance of the Scenario Tree class using self.scenarios. Returns: ScenarioTree: the scenario tree", "recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key = source + '", "the U.S. # Government retains certain rights in this software. # This software", "that given source. Args: directory (str): The name of the directory to save", "are changed by some other function, they will be changed in the newly", "' ' + str(i + 1) value = data[sources_key][key] elif source_type in ['wind']:", "scenario and the expected scenario. Returns: list[SkeletonScenario]: The list of all scenarios \"\"\"", "def write_raw_data(self, directory): \"\"\" This function writes out the raw data for this", "Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a title. plt.title(title +", "The axis to plot to Returns: axis: The axis plotted to \"\"\" if", "\"\"\" This method will update the dictionary of power values by replacing the", "the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This is a little hack", "color='b') # Add dps to the plot. if dps is not None: label", "(float): the forecast value Returns: string: the minimum and the maximum dispatch value,", "method will update the dictionary of power values by replacing the values for", "next 24 hours. for source in self.load_data: key = source + ' '", "forecast): \"\"\" Determines the minimum and the maximum dispatch value for the forecast.", "class ScenarioTree: \"\"\" Basic Tree representation of a set of scenarios. The root", "the power generation values into strings of minimum # and maximum dispatch values.", "product of all probabilities as we assume independence. Args: scenarios (List[PowerScenario]): The list", "function will change the names of the scenarios to be numbered in the", "has the power generation vectors from all scenarios as well as the paths", "of the respective source. \"\"\" for i in range(24): for source, source_type in", "to 24-vectors sources (List[ExtendedSource]): This is just used to get the source types", "# We pass through the comments as well to the InternalNode # Questionable...", "name. For each source of interest, this will store a 24-vector of power-values", "scenario file \"\"\" scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w')", "dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This class should manage all single skeleton", "+ 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a list of CommentedRawNodeData objcts instantiated", "___________________________________________________________________________ import datetime import json import os from collections import OrderedDict, namedtuple import", "write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data)", "that in a new source with the name aggregate_source. It will delete all", "comments as well to the InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data,", "scenario_data(self): \"\"\" This will construct the dictionary mapping keys to scenario values. \"\"\"", "Add forecast to the plot. if self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source]", "SkeletonScenarioSet: \"\"\" This class should manage all single skeleton scenarios and have methods", "as keys in the dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create", "power-values produced. Attributes: name (str): The name of the scenario power_dict (dict): A", "json file for this node to the specified directory Args: directory: the directory", "data self.children = [] self.comments = comments def add_child(self, node): \"\"\" Adds an", "dispatch value for the forecast. Args: dispatch (float): The fraction nondispatchable forecast (float):", "(float): The associated probability of the scenario comments (str): Additional details about how", "self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory + os.sep + filename,", "a dictionary mapping source names to lists of 24 floats and an associated", "to indicate whether the source to disaggregate is a load source \"\"\" if", "Power). It will store these results in a dictionary called 'data'. \"\"\" def", "disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will update the dictionary of power values", "The named tuple object with a merged PowerScenario and merged path dictionary \"\"\"", "1. \"\"\" prob_sum = sum(scen.probability for scen in self.scenarios) for scen in self.scenarios:", "any of the additonal pysp information. The name of the file will be", "{}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n' for child in self.children:", "i = 0 for source_name in sorted(sources): for scenario in all_scenarios: if scenario.name", "To initialize a PowerScenario object, one must pass a scenario name, a dictionary", "Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of", "+= val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\" Simple plotting", "= [0]*24 for name in source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i] +=", "the actual scenario and the expected scenario. Returns: list[SkeletonScenario]: The list of all", "-> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge", "source stored in this scenario onto the axis passed in (it will create", "by a blank space \"\"\" # In the case of solar power, the", "node): self.root = node def write_json_files(self, output_directory): \"\"\" Writes json files for each", "forecast = self.load_data[source][i] key = source + ' ' + str(i + 1)", "axis: The axis to plot to Returns: axis: The axis plotted to \"\"\"", "\"\"\" This class should manage all single skeleton scenarios and have methods for", "case of solar power, the passed forecast will be None if the #", "to the proportion of the power of the original source \"\"\" aggregated_power =", "in range(24): # Duplicate the load forecast for the next 24 hours. for", "merge the PowerScenario objects power_scenarios = [scen.scenario for scen in scenarios] scenario =", "source in sources: plt.figure(source) for scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-',", "in self.scenarios] def create_tree(self): \"\"\" This creates an instance of the Scenario Tree", "sources = list(self.scenarios[0].power_dict.keys()) # Create a plot for every source and add all", "label = '_nolegend_' # Display a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25),", "generation and the probabilities. This will create a file called 'scenarios.csv' in the", "= scenario.name scenario_name = source_name + ': ' + scen_name columns.append(scenario_name) values =", "will create one if none is passed in). Args: axis: The axis to", "in the directory specified. It is necessary to pass in the date since", "The list of node data objects \"\"\" return [scenario.to_raw_node() for scenario in self.scenarios]", "+= \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string", "parent to root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData(", "none is passed in). Args: axis: The axis to plot to Returns: axis:", "information about power generation and the associated probability and name. For each source", "function will normalize the probabilities of the scenarios so that they add up", "probability of the scenario comments (str): Additional details about how scenario was created", "for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class", "self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key,", "for this scenario. The raw data in this sense refers to the 24-vector", "in all_scenarios: if scenario.name == 'expected': scen_name = 'forecasts' else: scen_name = scenario.name", "up to 1. \"\"\" prob_sum = sum(scen.probability for scen in self.scenarios) for scen", "created among other things \"\"\" self.name = name self.power_dict = power_dict self.probability =", "(List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario): The actual scenario expected (SkeletonScenario): The", "data in the PowerScenario in conjunction with the load data passed in. Note", "the scenario data: the data of the scenario parent: the parent node comments:", "label=label, marker='o', color='g') label = '_nolegend_' # Add forecast to the plot. if", "\"\"\" for i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\"", "name which is the concatenation of all scenario names, and a probability which", "floats of power generation over the day probability (float): A value between 0", "+ str(self.root) class InternalNode: \"\"\" Representation for an individual node in the Scenario", "comments: A string detailing information about the scenario \"\"\" self.name = name self.probability", "values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values i += 1 scenario_frame", "comments=''): \"\"\" To initialize a PowerScenario object, one must pass a scenario name,", "else: raise RuntimeError(\"Power source '{}' has type '{}', the only \" \"types recognized", "This class should manage all single skeleton scenarios and have methods for exporting", "create it. The paths attribute will point to a dictionary # of the", "and the minimum and maximum # dispatch values as (str) values. data =", "for every source stored in this scenario onto the axis passed in (it", "power_dict = {} probability = 1 comments = '' # We merge name,", "dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power = [proportion*value for value", "these results in a dictionary called 'data'. \"\"\" def __init__(self, name, power_dict, prob,", "= InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree", "probability, data, and pointers to parents and children. \"\"\" def __init__(self, name, probability,", "scenario. Args: directory (str): The path to the directory to store the files", "* forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data): \"\"\"", "This will have a PowerScenario object and the corresponding paths # used to", "and pointers to parents and children. \"\"\" def __init__(self, name, probability, data=None, parent=None,", "named tuple object with a merged PowerScenario and merged path dictionary \"\"\" #", "and 1 representing the probability of the scenario comments (str): Additional details about", "pass a scenario name, a dictionary mapping source names to lists of 24", "hours, depending on the type of the respective source. \"\"\" for i in", "sources each with a proportion of the values. This will update the dictionry", "merges all the power dictionaries of the PowerScenario objects passed in. It will", "each with a proportion of the values. This will update the dictionry in-place.", "\"\"\" This function will normalize the probabilities of the scenarios so that they", "of power-values produced. Attributes: name (str): The name of the scenario power_dict (dict):", "scenario comments (str): Additional details about how scenario was created among other things", "in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self,", "The paths attribute will point to a dictionary # of the form {source_name", "expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario = expected self.source_names", "scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\"", "\"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def", "about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data = load_data self.types", "scenario which is formed by merging all the other scenarios \"\"\" name =", "axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power", "merge their path dictionaries path_dict = {} for scen in scenarios: path_dict.update(scen.paths) return", "will not copy the values, so if they are changed by some other", "exporting data to scenario files as well. Attributes: scenarios (list[SkeletonScenario]): a list of", "scenario onto the axis passed in (it will create one if none is", "scenario.power_dict[source_name] data[:,i] = values i += 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory", "self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation for an", "(SkeletonScenario): The expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario =", "+= '\\n' + scenario.comments # Here we drop the last underscore added name", "self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property", "_copy_power_generation(self, data): \"\"\" Copies the power generation data of the day for the", "generation values for the next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated,", "the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to merge Returns:", "= InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self):", "files to the directory specified. Raw refers to the fact that the file", "the directory specified. Raw refers to the fact that the file will only", "We first merge the PowerScenario objects power_scenarios = [scen.scenario for scen in scenarios]", "(str) values. data = {sources_key: OrderedDict(), load_key: OrderedDict()} for i in range(24): for", "axis.legend() return axis def add_load_data(self, load_data, sources): \"\"\" This will create a SkeletonScenario", "scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability:", "+ ' 24' value = data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has type", "details about how scenario was created among other things \"\"\" def __init__(self, name,", "self.scenarios) for scen in self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\" This function", "well to the InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments)", "= 1 comments = '' # We merge name, power dictionaries, probabilities, comments", "generation data for that given source. Args: directory (str): The name of the", "in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def", "json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format(", "dps (dict): the day part separators for each source if they are supposed", "\"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory + os.sep + filename, 'w') as f:", "(str): The name of the source to be disaggregated disaggregated (dict[str,float]): A dictionary", "= 'forecasts' else: scen_name = scenario.name scenario_name = source_name + ': ' +", "in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label =", "proportion of the values. Args: aggregate_source (str): The name of the source to", "Scenario_<name>.dat where <name> is replaced by the name of the scenario. Args: directory", "comments=''): \"\"\" Initializes an object of the InternalNode class. Args: name (str): the", "disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method will update the dictionary of power", "return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self):", "= source_power class SkeletonScenarioSet: \"\"\" This class should manage all single skeleton scenarios", "str(i + 25) data[sources_key][key] = value return data def to_raw_node(self): \"\"\" Creates a", "no parent specified, assume parent is root parent_name = 'root' if self.parent is", "ScenarioWithPaths: The named tuple object with a merged PowerScenario and merged path dictionary", "object for the actual and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected", "assume independence. Args: scenarios (List[PowerScenario]): The list of scenarios to merge Returns: PowerScenario:", "for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value))", "# Create a plot for every source and add all scenarios. label =", "else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] =", "object using the data in the PowerScenario in conjunction with the load data", "aggregate_source, disaggregated) for other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del", "prob_sum def normalize_names(self): \"\"\" This function will change the names of the scenarios", "(NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government", "minimum and the maximum dispatch value, separated by a blank space \"\"\" #", "Add dps to the plot. if dps is not None: label = 'Day", "(str): The name of the scenario power_dict (dict[str,List[float]]): This is a dictionary mapping", "the data in the PowerScenario in conjunction with the load data passed in.", "power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other): return self.name <", "axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a title. plt.title(title + source, y=1.08)", "For each source of interest, this will store a 24-vector of power-values produced.", "is just used to get the source types comments (str): A string containing", "for each source if they are supposed to be in the plot \"\"\"", "self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the", "Names of the sources to aggregate aggregate_sources (str): The name of the aggregate", "scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a list of", "of interest, this will store a 24-vector of power-values produced. Attributes: name (str):", "a list of scenarios actual_scenario (SkeletonScenario): the scenario from the actual data expected_scenario", "the power generation and the probabilities. This will create a file called 'scenarios.csv'", "\\ data[load_key][key] # Copy the power generation values for the next 24 hours.", "Display a grid and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') #", "This is a dictionary mapping source names to a list of 24 values", "in sources} def scenario_data(self): \"\"\" This will construct the dictionary mapping keys to", "collection of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The named tuple object with", "a plot for every source and add all scenarios. label = 'Scenarios' for", "to create it. The paths attribute will point to a dictionary # of", "set it to 0. forecast = 0 if forecast is None else forecast", "= None def set_root(self, node): self.root = node def write_json_files(self, output_directory): \"\"\" Writes", "of node data objects \"\"\" return [scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self):", "actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object for the actual", "generation values produced in a scenario without any of the additonal pysp information.", "power of the original source is_load (bool): A flag to indicate whether the", "Max Dispatchable Power). It will store these results in a dictionary called 'data'.", "The associated probability of the scenario comments (str): Additional details about how scenario", "the scenario load_data (dict[str,List[float]]): a dictionary mapping load sources to 24-vectors sources (List[ExtendedSource]):", "0 and 1 representing the probability of the scenario comments (str): Additional details", "plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which merges all the power", "def __init__(self, name, power_dict, prob, comments=''): \"\"\" To initialize a PowerScenario object, one", "files as well. Attributes: scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario): the", "os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a list of CommentedRawNodeData objcts", "data[load_key][key] = str(forecast) + '\\n' for i in range(24): # Duplicate the load", "power, the passed forecast will be None if the # respective hour lies", "\"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal node into a daps-style Raw_Node_Data", "power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\" Simple", "generation values prob (float): the probability of the scenario load_data (dict[str,List[float]]): a dictionary", "dictionry to disaggregate aggregate_source (str): The name of the source to be disaggregated", "None def set_root(self, node): self.root = node def write_json_files(self, output_directory): \"\"\" Writes json", "parent is root parent_name = 'root' if self.parent is None else self.parent.name filename", "24-vectors sources (List[ExtendedSource]): This is just used to get the source types comments", "proportion of the power of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def", "directory (str): A path to the directory to store the scenario file \"\"\"", "respective hour lies outside the hours of sunshine. # In this case, set", "\"\"\" for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the scenarios", "date of the scenario. Args: directory (str): The path to the directory to", "self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability)", "maximum # dispatch values as (str) values. data = {sources_key: OrderedDict(), load_key: OrderedDict()}", "' + str(i + 1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key]", "the day part separators for each source if they are supposed to be", "# We first merge the PowerScenario objects power_scenarios = [scen.scenario for scen in", "each of the scenarios. Returns: list[CommentedRawNodeData]: The list of node data objects \"\"\"", "This will update the dictionry in-place. Args: dict_ (dict): The dictionry to disaggregate", "actual scenario and the expected scenario. Returns: list[SkeletonScenario]: The list of all scenarios", "values prob (float): The associated probability of the scenario comments (str): Additional details", "name, power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes an object of the SkeletonScenario", "values sources (List[ExtendedSource]): A list of the sources used in the scenario Returns:", "object of the InternalNode class. Args: name (str): the name of the scenario", "power_dict self.probability = prob self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This", "name = \"\" power_dict = {} probability = 1 comments = '' #", "parent self.data = data self.children = [] self.comments = comments def add_child(self, node):", "of probabilistic scenarios in addition to the actual scenario and the expected scenario.", "construct a name which is the concatenation of all scenario names, and a", "through the comments as well to the InternalNode # Questionable... internal_node = InternalNode(scenario.name,", "(datetime-like): The date of the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index =", "None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is not None:", "the next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This", "the list of probabilistic scenarios in addition to the actual scenario and the", "will merge ScenarioWithPaths objects and return a ScenarioWithPaths objects which has the power", "routing which will plot all the power vectors for every source stored in", "source + ' 24' value = data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has", "(Raw_Node_Data): raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments)", "self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property returns", "self.data = data self.children = [] self.comments = comments def add_child(self, node): \"\"\"", "dispatch value, separated by a blank space \"\"\" # In the case of", "root points to an internal node which contains actual data for each stage.", "the actual data expected_scenario (SkeletonScenario): the scenario from the forecast data all_scenarios (list[SkeletonScenario]):", "as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand'", "for scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g')", "str(forecast) + '\\n' for i in range(24): # Duplicate the load forecast for", "dispatch (float): The fraction nondispatchable forecast (float): the forecast value Returns: string: the", "to 1. \"\"\" prob_sum = sum(scen.probability for scen in self.scenarios) for scen in", "name of the file will be Scenario_<name>.dat where <name> is replaced by the", "self.probability) with open(directory + os.sep + filename, 'w') as f: json.dump(self.data, f, sort_keys=True,", "proportion of the power of the original source is_load (bool): A flag to", "plt.ylabel('Power in Mw') # Create a title. plt.title(title + source, y=1.08) plt.savefig(directory +", "not None: label = 'Day Part Separators' for h in dps[source]: plt.axvline(x=h, zorder=1,", "'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method", "source names to lists of 24 floats and an associated probability. Args: name", "will create a file called 'scenarios.csv' in the directory specified. It is necessary", "merge Returns: ScenarioWithPaths: The named tuple object with a merged PowerScenario and merged", "for this node to the specified directory Args: directory: the directory to store", "of 24 values prob (float): The associated probability of the scenario comments (str):", "the scenario. Args: directory (str): A path to the directory to store the", "values as (str) values. data = {sources_key: OrderedDict(), load_key: OrderedDict()} for i in", "return string class ScenarioTree: \"\"\" Basic Tree representation of a set of scenarios.", "representing the probability of the scenario comments (str): Additional details about how scenario", "(str): the name of the scenario probability (float): the probability of the scenario", "= source_name + ': ' + scen_name columns.append(scenario_name) values = [scenario.probability] + \\", "the scenarios in the tree \"\"\" for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self):", "del dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power = [proportion*value for value in", "dispatch values as (str) values. data = {sources_key: OrderedDict(), load_key: OrderedDict()} for i", "all the source power vectors for the sources provided and store that in", "\"\"\" This will construct the dictionary mapping keys to scenario values. \"\"\" #", "for key, data in self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data in data.items():", "the day for the next 24 hours, depending on the type of the", "sources (List[ExtendedSource]): This is just used to get the source types comments (str):", "self.dispatches = {source.name: source.frac_nondispatch for source in sources} def scenario_data(self): \"\"\" This will", "self.power_dict = power_dict self.probability = prob self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated):", "disaggregated) for other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source]", "in self.scenarios: # We pass through the comments as well to the InternalNode", "a scenario name, a dictionary mapping source names to lists of 24 floats", "data in this sense refers to the 24-vector of the power generation values", "def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an object of the SkeletonScenarioSet class.", "values for the specified source by a collection of sources each with a", "+ \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This routine should write all", "sources provided and store that in a new source with the name aggregate_source.", "the Revised BSD License. # ___________________________________________________________________________ import datetime import json import os from", "None: fig, axis = plt.subplots() for name, vect in self.power_dict.items(): xs = list(range(24))", "label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def", "Returns: list[CommentedRawNodeData]: The list of node data objects \"\"\" return [scenario.to_raw_node() for scenario", "be numbered in the form \"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios): scenario.name", "about the scenario \"\"\" self.name = name self.probability = probability self.parent = parent", "root = InternalNode(\"root\", probability=1) for scenario in self.scenarios: # We pass through the", "Part Separators' for h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label =", "all scenarios. We assume independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection", "A value between 0 and 1 representing the probability of the scenario comments", "to be disaggregated disaggregated (dict[str,float]): A dictionary mapping names of the new sources", "axis=None): \"\"\" Simple plotting routing which will plot all the power vectors for", "axis passed in (it will create one if none is passed in). Args:", "passed in. Note this will not copy the values, so if they are", "not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps to", "type of the respective source. \"\"\" for i in range(24): for source, source_type", "os from collections import OrderedDict, namedtuple import numpy as np import matplotlib as", "probability *= scenario.probability if scenario.comments: comments += '\\n' + scenario.comments # Here we", "A scenario which is formed by merging all the other scenarios \"\"\" name", "not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources =", "and load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self):", "to scenario (i.e, Min Dispatchable Power, Max Dispatchable Power). It will store these", "scenario name, a dictionary mapping source names to lists of 24 floats and", "data[sources_key][key] = value return data def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object", "values by replacing the values for the specified source by a collection of", "of all probabilities as we assume independence. Args: scenarios (List[PowerScenario]): The list of", "+ ' ' + str(i + 1) data[load_key][key] = str(forecast) + '\\n' for", "scenarios. This will create a plot for each source with all the power", "the date since this object does not have any knowledge of the date", "the original source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in", "value Returns: string: the minimum and the maximum dispatch value, separated by a", "across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to merge", "of the values. Args: aggregate_source (str): The name of the source to be", "Solutions of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with", "the scenario from the forecast data all_scenarios (list[SkeletonScenario]): The list of scenarios including", "def write_raw_scenarios(self, directory, date): \"\"\" This routine should write all of the raw", "data: the data of the scenario parent: the parent node comments: A string", "zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_' # Display a legend. lgd =", "is the concatenation of all scenario names, and a probability which is a", "source with all the power generation data for that given source. Args: directory", "comments for scenario in scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict) probability *=", "scenario. Args: directory (str): A path to the directory to store the scenario", "of the aggregate source \"\"\" power_vector = [0]*24 for name in source_names: for", "forecast (float): the forecast value Returns: string: the minimum and the maximum dispatch", "the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario): The", "Power, Max Dispatchable Power). It will store these results in a dictionary called", "self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum and the", "directory to store the json file in \"\"\" # if no parent specified,", "data, and pointers to parents and children. \"\"\" def __init__(self, name, probability, data=None,", "of minimum # and maximum dispatch values. key = source + ' '", "other function, they will be changed in the newly created object Args: load_data", "object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal node into a daps-style", "self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source in self.load_data: #", "whether the source to disaggregate is a load source \"\"\" if is_load: disaggregate_dict(self.load_data)", "= \"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data): \"\"\" Copies the power", "def dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum and the maximum dispatch value", "create_raw_nodes(self): \"\"\" This returns a list of CommentedRawNodeData objcts instantiated from each of", "= list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name))", "new sources to the proportion of the power of the original source \"\"\"", "{}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\" Basic Tree representation of a set", "+ 25) data[sources_key][key] = value return data def to_raw_node(self): \"\"\" Creates a daps-style", "'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key = source + ' ' +", "columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a list", "pointers to parents and children. \"\"\" def __init__(self, name, probability, data=None, parent=None, comments=''):", "paths from all scenarios. We assume independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]):", "probabilities. This will create a file called 'scenarios.csv' in the directory specified. It", "power dictionaries, probabilities, comments for scenario in scenarios: name += scenario.name + '_'", "sources used in the scenario Returns: SkeletonScenario: The scenario with power and load", "of the raw scenario files to the directory specified. Raw refers to the", "replaced by the name of the scenario. Args: directory (str): A path to", "Adds an internal node to the children list Args: node (InternalNode): An InternalNode", "'\\n' + scenario.comments # Here we drop the last underscore added name =", "store the scenario file \"\"\" scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name) with", "y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This", "0 for source_name in sorted(sources): for scenario in all_scenarios: if scenario.name == 'expected':", "'k-', zorder=2, label=label, marker='o', color='g') label = '_nolegend_' # Add forecast to the", "the concatenation of all scenario names, and a probability which is a product", "This function writes out the raw data for this scenario. The raw data", "object and the corresponding paths # used to create it. The paths attribute", "data of the scenario parent: the parent node comments: A string detailing information", "including the actual and expected scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\"", "' 24' value = data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has type '{}',", "'\\n' for i in range(24): # Duplicate the load forecast for the next", "not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is not", "if dps is not None: label = 'Day Part Separators' for h in", "color='g') label = '_nolegend_' # Add forecast to the plot. if self.expected_scenario is", "\"\"\" This routine should write all of the raw scenario files to the", "scenarios. We assume independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of", "scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the actual and", "and an associated probability. Args: name (str): The name of the scenario power_dict", "for other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del", "\"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power =", "= \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n' for", "in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the", "part separators for each source if they are supposed to be in the", "the Scenario tree. Each node has an associated name, probability, data, and pointers", "the plot. if self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast',", "plt.subplots() for name, vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours", "source.frac_nondispatch for source in sources} def scenario_data(self): \"\"\" This will construct the dictionary", "list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property returns the list of probabilistic scenarios", "in). Args: axis: The axis to plot to Returns: axis: The axis plotted", "scenarios stored in the true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list", "the file will only contain the 24-vectors of the power generation and the", "power_dict, probability, comments) # This will have a PowerScenario object and the corresponding", "with a proportion of the values. Args: aggregate_source (str): The name of the", "names to a list of 24 values prob (float): The associated probability of", "load forecast for the next 24 hours. for source in self.load_data: key =", "source + ' ' + str(i + 1) value = data[sources_key][key] elif source_type", "= InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data", "return PowerScenario(name, power_dict, probability, comments) # This will have a PowerScenario object and", "forecast) return value def _copy_power_generation(self, data): \"\"\" Copies the power generation data of", "A flag to indicate whether the source to disaggregate is a load source", "dictionary mapping source names to a list of 24 values prob (float): The", "to an internal node which contains actual data for each stage. \"\"\" def", "Attributes: name (str): The name of the scenario power_dict (dict): A mapping from", "dps=None): \"\"\" Basic plotting routine for the scenarios. This will create a plot", "= comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will update the dictionary", "vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis", "it to 0. forecast = 0 if forecast is None else forecast min_dispatch", "= list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property returns the list of probabilistic", "str(i + 1) value = data[sources_key][key] elif source_type in ['wind']: key = source", "representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory):", "contain all the data parameters and values that change from scenario to scenario", "= data self.children = [] self.comments = comments def add_child(self, node): \"\"\" Adds", "values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name)", "(dict): a dictionary mapping source names to 24-vectors of power generation values prob", "in range(24): for source in self.power_dict: # Translate the power generation values into", "a merged PowerScenario and merged path dictionary \"\"\" # We first merge the", "scenario Returns: SkeletonScenario: The scenario with power and load values \"\"\" return SkeletonScenario(self.name,", "prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key =", "A string detailing information about the scenario \"\"\" self.name = name self.probability =", "source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label = '_nolegend_' #", "key = source + ' ' + str(i + 25) data[sources_key][key] = value", "of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data,", "new sources to the proportion of the power of the original source is_load", "be disaggregated disaggregated (dict[str,float]): A dictionary mapping names of the new sources to", "of the power generation and the probabilities. This will create a file called", "sources each with a proportion of the values. Args: aggregate_source (str): The name", "Government retains certain rights in this software. # This software is distributed under", "return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method will update the", "[scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self): \"\"\" This creates an instance of", "{}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\"", "methods for exporting data to scenario files as well. Attributes: scenarios (list[SkeletonScenario]): a", "for i in range(24): # Duplicate the load forecast for the next 24", "over the day probability (float): A value between 0 and 1 representing the", "of the original source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name, proportion", "\"\"\" Adds an internal node to the children list Args: node (InternalNode): An", "results in a dictionary called 'data'. \"\"\" def __init__(self, name, power_dict, prob, load_data,", "a list of CommentedRawNodeData objcts instantiated from each of the scenarios. Returns: list[CommentedRawNodeData]:", "day for the next 24 hours, depending on the type of the respective", "using the data in the PowerScenario in conjunction with the load data passed", "sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This routine should write all of the", "source of interest, this will store a 24-vector of power-values produced. Attributes: name", "Simple plotting routing which will plot all the power vectors for every source", "all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date):", "(dict[str,List[float]]): This is a dictionary mapping source names to a list of 24", "is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is", "(float): A value between 0 and 1 representing the probability of the scenario", "load_data (dict[str,List[float]]): a dictionary mapping load sources to 24-vectors sources (List[ExtendedSource]): This is", "\"and 'hydro'.\".format(source, source_type)) key = source + ' ' + str(i + 25)", "forecast for the next 24 hours. for source in self.load_data: key = source", "del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function writes out the", "of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario):", "Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of raw scenario nodes \"\"\" return [child.to_raw_node()", "SkeletonScenario object using the data in the PowerScenario in conjunction with the load", "data[:,i] = values i += 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory +", "power_vector = [0]*24 for name in source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i]", "pass in the date since this object does not have any knowledge of", "values. This will update the dictionry in-place. Args: dict_ (dict): The dictionry to", "source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def", "in. It will construct a name which is the concatenation of all scenario", "import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key", "__str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string +=", "of scenarios including the actual and expected scenario \"\"\" def __init__(self, scenarios, actual=None,", "the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data = load_data self.types =", "string containing extra details about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments)", "the scenarios. Returns: list[CommentedRawNodeData]: The list of node data objects \"\"\" return [scenario.to_raw_node()", "the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This", "It will store these results in a dictionary called 'data'. \"\"\" def __init__(self,", "about how scenario was created among other things \"\"\" def __init__(self, name, power_dict,", "data with strings as keys and the minimum and maximum # dispatch values", "PowerScenario in conjunction with the load data passed in. Note this will not", "representation of a set of scenarios. The root points to an internal node", "= [proportion*value for value in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This", "associated probability of the scenario comments (str): Additional details about how scenario was", "actual_scenario (SkeletonScenario): the scenario from the actual data expected_scenario (SkeletonScenario): the scenario from", "a set of scenarios. The root points to an internal node which contains", "names of the new sources to the proportion of the power of the", "root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This", "def normalize_names(self): \"\"\" This function will change the names of the scenarios to", "and forecast data. Args: write_directory: the directory to write in \"\"\" actual_node =", "for the scenarios. This will create a plot for each source with all", "file \"\"\" scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as", "node comments: A string detailing information about the scenario \"\"\" self.name = name", "to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object from the scenario. Sets the parent", "prob (float): the probability of the scenario load_data (dict[str,List[float]]): a dictionary mapping load", "lies outside the hours of sunshine. # In this case, set it to", "copy the values, so if they are changed by some other function, they", "source, source_type in self.types.items(): if source_type in ['solar', 'hydro']: key = source +", "distributed under the Revised BSD License. # ___________________________________________________________________________ import datetime import json import", "with strings as keys and the minimum and maximum # dispatch values as", "a probability which is a product of all probabilities as we assume independence.", "prob, comments=''): \"\"\" To initialize a PowerScenario object, one must pass a scenario", "dictionaries, probabilities, comments for scenario in scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict)", "shadow=True) # Display a grid and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0,", "in the true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of raw", "the data parameters and values that change from scenario to scenario (i.e, Min", "This creates an instance of the Scenario Tree class using self.scenarios. Returns: ScenarioTree:", "plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps to the plot. if dps is", "sources: plt.figure(source) for scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label,", "and store that in a new source with the name aggregate_source. It will", "This turns the scenarios stored in the true into daps-style Raw_Node_Data objects. Returns:", "of the scenario data: the data of the scenario parent: the parent node", "an associated probability. Args: name (str): The name of the scenario power_dict (dict[str,List[float]]):", "dictionary \"\"\" # We first merge the PowerScenario objects power_scenarios = [scen.scenario for", "self.load_data = load_data self.types = {source.name: source.source_type for source in sources} self.dispatches =", "prob (float): The associated probability of the scenario comments (str): Additional details about", "sources to the proportion of the power of the original source is_load (bool):", "of the SkeletonScenario class. Args: power_dict (dict): a dictionary mapping source names to", "associated name, probability, data, and pointers to parents and children. \"\"\" def __init__(self,", "The root points to an internal node which contains actual data for each", "24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method will", "of power generation over the day probability (float): A value between 0 and", "label = 'Day Part Separators' for h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey',", "values i += 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep +", "BSD License. # ___________________________________________________________________________ import datetime import json import os from collections import", "power_dict, prob, comments=''): \"\"\" To initialize a PowerScenario object, one must pass a", "name, proportion in disaggregated.items(): source_power = [proportion*value for value in aggregated_power] dict_[name] =", "normalize_probabilities(self): \"\"\" This function will normalize the probabilities of the scenarios so that", "object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name)", "objects to merge Returns: ScenarioWithPaths: The named tuple object with a merged PowerScenario", "def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for", "list of probabilistic scenarios in addition to the actual scenario and the expected", "class. Args: scenarios (List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario): The actual scenario", "Writes json files for each of the scenarios in the tree \"\"\" for", "disaggregated): \"\"\" This method will update the dictionary of power values by replacing", "# ___________________________________________________________________________ # # Prescient # Copyright 2020 National Technology & Engineering Solutions", "forecast min_dispatch = dispatch * forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return value", "the InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree", "SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string", "Args: load_data (dict[str,List[float]]): A dictionary mapping names of load sources to 24-vectors of", "prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower", "+ str(i + 1) value = data[sources_key][key] elif source_type in ['wind']: key =", "list of 24 values prob (float): The associated probability of the scenario comments", "scenario in self.scenarios] def create_tree(self): \"\"\" This creates an instance of the Scenario", "= {} probability = 1 comments = '' # We merge name, power", "+ str(i + 1) data[load_key][key] = str(forecast) + '\\n' for i in range(24):", "other.name class SkeletonScenario(PowerScenario): \"\"\" This class should contain all the data parameters and", "Raw refers to the fact that the file will only contain the 24-vectors", "name self.power_dict = power_dict self.probability = prob self.comments = comments def disaggregate_source(self, aggregate_source,", "collections import OrderedDict, namedtuple import numpy as np import matplotlib as mpl import", "'scenarios.csv' in the directory specified. It is necessary to pass in the date", "This will create a SkeletonScenario object using the data in the PowerScenario in", "load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self): return", "for an individual node in the Scenario tree. Each node has an associated", "the values for the specified source by a collection of sources each with", "A dictionary of data with strings as keys and the minimum and maximum", "source in sources} self.dispatches = {source.name: source.frac_nondispatch for source in sources} def scenario_data(self):", "else: scen_name = scenario.name scenario_name = source_name + ': ' + scen_name columns.append(scenario_name)", "+ 1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for", "in self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\" This function will change the", "name, power_dict, prob, comments=''): \"\"\" To initialize a PowerScenario object, one must pass", "+ list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data =", "of the scenarios. Returns: list[CommentedRawNodeData]: The list of node data objects \"\"\" return", "writes out the raw data for this scenario. The raw data in this", "added name = name[:-1] return PowerScenario(name, power_dict, probability, comments) # This will have", "class will only contain information about power generation and the associated probability and", "the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a title. plt.title(title + source,", "probability=1) for scenario in self.scenarios: # We pass through the comments as well", "to title (str): The title of the plot dps (dict): the day part", "= 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This", "# Here we drop the last underscore added name = name[:-1] return PowerScenario(name,", "+ source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which", "scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we merge their path dictionaries path_dict =", "(str): The name of the directory to save to title (str): The title", "Args: dispatch (float): The fraction nondispatchable forecast (float): the forecast value Returns: string:", "the directory to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node =", "disaggregated disaggregated (dict[str,float]): A dictionary mapping names of the new sources to the", "an object of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list of scenarios", "{}\\n'.format(self.probability) return string def __lt__(self, other): return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\"", "1 representing the probability of the scenario comments (str): Additional details about how", "data expected_scenario (SkeletonScenario): the scenario from the forecast data all_scenarios (list[SkeletonScenario]): The list", "add all scenarios. label = 'Scenarios' for source in sources: plt.figure(source) for scenario", "else forecast min_dispatch = dispatch * forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return", "for that given source. Args: directory (str): The name of the directory to", "Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting", "this scenario onto the axis passed in (it will create one if none", "MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will update the dictionary", "the scenarios so that they add up to 1. \"\"\" prob_sum = sum(scen.probability", "so if they are changed by some other function, they will be changed", "the sources provided and store that in a new source with the name", "# # Prescient # Copyright 2020 National Technology & Engineering Solutions of Sandia,", "since this object does not have any knowledge of the date of the", "path dictionaries path_dict = {} for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict)", "bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which merges all", "prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_,", "self.scenarios: # We pass through the comments as well to the InternalNode #", "for scenario in scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability", "hour lies outside the hours of sunshine. # In this case, set it", "a scenario without any of the additonal pysp information. The name of the", "parent=None, comments=''): \"\"\" Initializes an object of the InternalNode class. Args: name (str):", "date since this object does not have any knowledge of the date of", "parent_name = 'root' if self.parent is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name,", "mapping load sources to 24-vectors sources (List[ExtendedSource]): This is just used to get", "of 24 floats of power generation over the day probability (float): A value", "object of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list of scenarios actual", "(str): Additional details about how scenario was created among other things \"\"\" self.name", "passed in). Args: axis: The axis to plot to Returns: axis: The axis", "We pass through the comments as well to the InternalNode # Questionable... internal_node", "= prob self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will", "self.probability = prob self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method", "InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object", "files date (datetime-like): The date of the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory)", "'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects and return a ScenarioWithPaths", "os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario", "created among other things \"\"\" def __init__(self, name, power_dict, prob, comments=''): \"\"\" To", "to save to title (str): The title of the plot dps (dict): the", "inner_key, inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree:", "os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios", "0. forecast = 0 if forecast is None else forecast min_dispatch = dispatch", "def __str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string", "scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario): the scenario from the actual", "of the power of the original source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source]", "for the actual and forecast data. Args: write_directory: the directory to write in", "the sources used in the scenario Returns: SkeletonScenario: The scenario with power and", "change from scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable Power). It", "their path dictionaries path_dict = {} for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario,", "Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic", "routine for the scenarios. This will create a plot for each source with", "24 hours, depending on the type of the respective source. \"\"\" for i", "in self.load_data: # Save the load forecast. forecast = self.load_data[source][i] key = source", "mapping source names to lists of 24 floats and an associated probability. Args:", "just used to get the source types comments (str): A string containing extra", "def scenario_data(self): \"\"\" This will construct the dictionary mapping keys to scenario values.", "= parent self.data = data self.children = [] self.comments = comments def add_child(self,", "legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a grid", "numbered in the form \"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios): scenario.name =", "This function will change the names of the scenarios to be numbered in", "\"\"\" Basic Tree representation of a set of scenarios. The root points to", "power_dict (dict[str,List[float]]): This is a dictionary mapping source names to a list of", "plot for each source with all the power generation data for that given", "of sunshine. # In this case, set it to 0. forecast = 0", "+ ' ' + str(i + 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] #", "source and add all scenarios. label = 'Scenarios' for source in sources: plt.figure(source)", "aggregate_source. It will delete all the original source power vectors. Args: source_names (list[str]):", "CommentedRawNodeData objcts instantiated from each of the scenarios. Returns: list[CommentedRawNodeData]: The list of", "a PowerScenario object and the corresponding paths # used to create it. The", "\"\"\" self.name = name self.probability = probability self.parent = parent self.data = data", "retains certain rights in this software. # This software is distributed under the", "the comments as well to the InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability,", "with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source))", "and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(),", "this object does not have any knowledge of the date of the scenario.", "\"\"\" This turns the scenarios stored in the true into daps-style Raw_Node_Data objects.", "string: the minimum and the maximum dispatch value, separated by a blank space", "name of the scenario power_dict (dict): A mapping from source names to lists", "of the scenarios to be numbered in the form \"Scenario_i\". \"\"\" for i,", "date): \"\"\" This routine should write all of the raw scenario files to", "values, so if they are changed by some other function, they will be", "pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution", "We assume independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths", "the scenario file \"\"\" scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file,", "minimum and the maximum dispatch value for the forecast. Args: dispatch (float): The", "value in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This class should manage", "the scenario tree \"\"\" root = InternalNode(\"root\", probability=1) for scenario in self.scenarios: #", "= {} for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\"", "well as the paths from all scenarios. We assume independence across the scenarios.", "the actual and forecast data. Args: write_directory: the directory to write in \"\"\"", "power of the original source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name,", "pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string", "update the dictionry in-place. Args: dict_ (dict): The dictionry to disaggregate aggregate_source (str):", "= [] i = 0 for source_name in sorted(sources): for scenario in all_scenarios:", "write_raw_data(self, directory): \"\"\" This function writes out the raw data for this scenario.", "child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the scenarios stored in the true into", "if scenario.name == 'expected': scen_name = 'forecasts' else: scen_name = scenario.name scenario_name =", "It will delete all the original source power vectors. Args: source_names (list[str]): Names", "from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def", "values produced in a scenario without any of the additonal pysp information. The", "values. Args: aggregate_source (str): The name of the source to be disaggregated disaggregated", "Dispatchable Power). It will store these results in a dictionary called 'data'. \"\"\"", "node into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing scenario \"\"\"", "class SkeletonScenarioSet: \"\"\" This class should manage all single skeleton scenarios and have", "this software. # This software is distributed under the Revised BSD License. #", "self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method will update the dictionary", "aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power = [proportion*value", "self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This routine should write", "not os.path.isdir(directory): os.makedirs(directory) # This is a little hack to get the source", "name (str): The name of the scenario power_dict (dict[str,List[float]]): This is a dictionary", "pandas as pd from prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen", "all the data parameters and values that change from scenario to scenario (i.e,", "list[SkeletonScenario]: The list of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios)", "Args: axis: The axis to plot to Returns: axis: The axis plotted to", "scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we merge their path dictionaries", "namedtuple import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt", "source names to a list of 24 values prob (float): The associated probability", "self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label = '_nolegend_'", "store that in a new source with the name aggregate_source. It will delete", "forecast will be None if the # respective hour lies outside the hours", "terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights", "axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self,", "dictionary mapping names of load sources to 24-vectors of load values sources (List[ExtendedSource]):", "name += scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments +=", "label='Forecast', color='r') if self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual',", "= [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values i += 1 scenario_frame =", "PowerScenario: A scenario which is formed by merging all the other scenarios \"\"\"", "for scenario in all_scenarios: if scenario.name == 'expected': scen_name = 'forecasts' else: scen_name", "that the file will only contain the 24-vectors of the power generation and", "source. Args: directory (str): The name of the directory to save to title", "all_scenarios (list[SkeletonScenario]): The list of scenarios including the actual and expected scenario \"\"\"", "of the scenario. Args: directory (str): A path to the directory to store", "= data[sources_key][key] elif source_type in ['wind']: key = source + ' 24' value", "for source_name in sorted(sources): for scenario in all_scenarios: if scenario.name == 'expected': scen_name", "update the dictionary of power values by replacing the values for the specified", "of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\"", "the tree \"\"\" for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns", "os.sep + filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string", "from scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable Power). It will", "vectors for every source stored in this scenario onto the axis passed in", "scenario parent: the parent node comments: A string detailing information about the scenario", "24 floats and an associated probability. Args: name (str): The name of the", "+= \"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\" Basic Tree representation of", "scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\"", "was created among other things \"\"\" def __init__(self, name, power_dict, prob, comments=''): \"\"\"", "del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function writes out the raw data", "the values, so if they are changed by some other function, they will", "form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This", "+ os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a list of CommentedRawNodeData", "the file will be Scenario_<name>.dat where <name> is replaced by the name of", "probability of the scenario data: the data of the scenario parent: the parent", "node which contains actual data for each stage. \"\"\" def __init__(self): self.root =", "def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects and return a ScenarioWithPaths objects", "called 'scenarios.csv' in the directory specified. It is necessary to pass in the", "returns the list of probabilistic scenarios in addition to the actual scenario and", "an internal node to the children list Args: node (InternalNode): An InternalNode object", "source_power = [proportion*value for value in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\"", "and expected scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an object", "for the actual and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data", "the new sources to the proportion of the power of the original source", "to be in the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This is", "returns a list of CommentedRawNodeData objcts instantiated from each of the scenarios. Returns:", "store the files date (datetime-like): The date of the scenarios \"\"\" if not", "construct the dictionary mapping keys to scenario values. \"\"\" # A dictionary of", "initialize a PowerScenario object, one must pass a scenario name, a dictionary mapping", "Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory,", "\"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\" Basic Tree representation of a", "self.actual_scenario = actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\"", "the dictionary of power values by replacing the values for the specified source", "the PowerScenario in conjunction with the load data passed in. Note this will", "= [] self.comments = comments def add_child(self, node): \"\"\" Adds an internal node", "data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\" Basic Tree", "self.types = {source.name: source.source_type for source in sources} self.dispatches = {source.name: source.frac_nondispatch for", "forecast data. Args: write_directory: the directory to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name,", "passed forecast will be None if the # respective hour lies outside the", "in the dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a plot", "to the children list Args: node (InternalNode): An InternalNode object \"\"\" self.children.append(node) def", "scenario probability (float): the probability of the scenario data: the data of the", "they add up to 1. \"\"\" prob_sum = sum(scen.probability for scen in self.scenarios)", "node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self,", "the respective source. \"\"\" for i in range(24): for source, source_type in self.types.items():", "data[sources_key][key] = value for source in self.load_data: # Save the load forecast. forecast", "parent_name, self.probability) with open(directory + os.sep + filename, 'w') as f: json.dump(self.data, f,", "of the Scenario Tree class using self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\"", "of power generation values prob (float): the probability of the scenario load_data (dict[str,List[float]]):", "power generation data for that given source. Args: directory (str): The name of", "to the actual scenario and the expected scenario. Returns: list[SkeletonScenario]: The list of", "source_name in sorted(sources): for scenario in all_scenarios: if scenario.name == 'expected': scen_name =", "of the plot dps (dict): the day part separators for each source if", "without any of the additonal pysp information. The name of the file will", "in addition to the actual scenario and the expected scenario. Returns: list[SkeletonScenario]: The", "1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source", "This will construct the dictionary mapping keys to scenario values. \"\"\" # A", "source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will", "\"\"\" Returns the corresponding Raw_Node_Data object for the actual and the expected scenario.", "dict_ (dict): The dictionry to disaggregate aggregate_source (str): The name of the source", "save to title (str): The title of the plot dps (dict): the day", "the specified source by a collection of sources each with a proportion of", "generation values into strings of minimum # and maximum dispatch values. key =", "load forecast. forecast = self.load_data[source][i] key = source + ' ' + str(i", "[self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This routine should", "== 'expected': scen_name = 'forecasts' else: scen_name = scenario.name scenario_name = source_name +", "'_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments += '\\n' + scenario.comments #", "actual scenario expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario =", "to plot to Returns: axis: The axis plotted to \"\"\" if axis is", "string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string += \"{}:\\n\".format(key) for inner_key,", "= self.load_data[source][i] key = source + ' ' + str(i + 1) data[load_key][key]", "import pandas as pd from prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen as", "1 comments = '' # We merge name, power dictionaries, probabilities, comments for", "for value in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This class should", "will normalize the probabilities of the scenarios so that they add up to", "power dictionaries of the PowerScenario objects passed in. It will construct a name", "expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario", "scen_name columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values i +=", "+ 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy the power generation values", "= self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source in self.load_data: # Save the", "a scenario which merges all the power dictionaries of the PowerScenario objects passed", "using self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\" root = InternalNode(\"root\", probability=1) for", "and return a ScenarioWithPaths objects which has the power generation vectors from all", "string += 'Children:\\n' for child in self.children: string += str(child) return string +", "actual (SkeletonScenario): The actual scenario expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios =", "mapping keys to scenario values. \"\"\" # A dictionary of data with strings", "if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources", "Returns: ScenarioTree: the scenario tree \"\"\" root = InternalNode(\"root\", probability=1) for scenario in", "keys in the dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a", "maximum dispatch value, separated by a blank space \"\"\" # In the case", "object does not have any knowledge of the date of the scenario. Args:", "for scen in self.scenarios) for scen in self.scenarios: scen.probability /= prob_sum def normalize_names(self):", "source in self.load_data: key = source + ' ' + str(i + 1)", "Initializes an object of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list of", "The date of the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability']", "+= 'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other): return self.name < other.name class", "directory to store the files date (datetime-like): The date of the scenarios \"\"\"", "load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string", "is passed in). Args: axis: The axis to plot to Returns: axis: The", "to Returns: axis: The axis plotted to \"\"\" if axis is None: fig,", "\" \"types recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key = source", "parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes json file for this node", "about how scenario was created among other things \"\"\" self.name = name self.power_dict", "label='Actual', color='b') # Add dps to the plot. if dps is not None:", "# In this case, set it to 0. forecast = 0 if forecast", "in Mw') # Create a title. plt.title(title + source, y=1.08) plt.savefig(directory + os.sep", "f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items():", "directory to save to title (str): The title of the plot dps (dict):", "# This is a little hack to get the source names, these are", "probability which is a product of all probabilities as we assume independence. Args:", "\"\"\" Creates a daps-style Raw_Node_Data object from the scenario. Sets the parent to", "= plt.subplots() for name, vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name)", "the maximum dispatch value for the forecast. Args: dispatch (float): The fraction nondispatchable", "self.children = [] self.comments = comments def add_child(self, node): \"\"\" Adds an internal", "def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will update the dictionary of power", "necessary to pass in the date since this object does not have any", "and the probabilities. This will create a file called 'scenarios.csv' in the directory", "PowerScenario object, one must pass a scenario name, a dictionary mapping source names", "directory): \"\"\" This function writes out the raw data for this scenario. The", "the source power vectors for the sources provided and store that in a", "this will not copy the values, so if they are changed by some", "{sources_key: OrderedDict(), load_key: OrderedDict()} for i in range(24): for source in self.power_dict: #", "load_key: OrderedDict()} for i in range(24): for source in self.power_dict: # Translate the", "@property def all_scenarios(self): \"\"\" This property returns the list of probabilistic scenarios in", "# Add forecast to the plot. if self.expected_scenario is not None: forecast_range =", "be None if the # respective hour lies outside the hours of sunshine.", "raw_value) data[sources_key][key] = value for source in self.load_data: # Save the load forecast.", "the scenario from the actual data expected_scenario (SkeletonScenario): the scenario from the forecast", "scenario in scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if", "class. Args: name (str): the name of the scenario probability (float): the probability", "sunshine. # In this case, set it to 0. forecast = 0 if", "lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a grid and", "file for this node to the specified directory Args: directory: the directory to", "Duplicate the load forecast for the next 24 hours. for source in self.load_data:", "plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a", "DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this software.", "scenario. Returns: list[SkeletonScenario]: The list of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] +", "Determines the minimum and the maximum dispatch value for the forecast. Args: dispatch", "__init__(self, name, probability, data=None, parent=None, comments=''): \"\"\" Initializes an object of the InternalNode", "This class will only contain information about power generation and the associated probability", "self.root = node def write_json_files(self, output_directory): \"\"\" Writes json files for each of", "will change the names of the scenarios to be numbered in the form", "# dispatch values as (str) values. data = {sources_key: OrderedDict(), load_key: OrderedDict()} for", "plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a title. plt.title(title + source, y=1.08) plt.savefig(directory", "currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root',", "source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power", "nodes \"\"\" return [child.to_raw_node() for child in self.root.children] def __str__(self): return \"Tree:\\n\" +", "turns the scenarios stored in the true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]):", "marker='o', color='g') label = '_nolegend_' # Add forecast to the plot. if self.expected_scenario", "f, sort_keys=True, indent=2) def __str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name,", "= self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\"", "[scen.scenario for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we merge their", "in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power", "equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self):", "with a merged PowerScenario and merged path dictionary \"\"\" # We first merge", "sorted(sources): for scenario in all_scenarios: if scenario.name == 'expected': scen_name = 'forecasts' else:", "plt.title(title + source, y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def", "which is formed by merging all the other scenarios \"\"\" name = \"\"", "Min Dispatchable Power, Max Dispatchable Power). It will store these results in a", "scenarios in the tree \"\"\" for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\"", "power generation values produced in a scenario without any of the additonal pysp", "the specified directory Args: directory: the directory to store the json file in", "source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] =", "(str): The path to the directory to store the files date (datetime-like): The", "floats and an associated probability. Args: name (str): The name of the scenario", "with open(directory + os.sep + filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=2)", "\"\"\" self.name = name self.power_dict = power_dict self.probability = prob self.comments = comments", "this sense refers to the 24-vector of the power generation values produced in", "of the scenario load_data (dict[str,List[float]]): a dictionary mapping load sources to 24-vectors sources", "an instance of the Scenario Tree class using self.scenarios. Returns: ScenarioTree: the scenario", "power of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source):", "string detailing information about the scenario \"\"\" self.name = name self.probability = probability", "plot dps (dict): the day part separators for each source if they are", "\"\"\" This function writes out the raw data for this scenario. The raw", "label = '_nolegend_' # Add forecast to the plot. if self.expected_scenario is not", "contain the 24-vectors of the power generation and the probabilities. This will create", "power generation over the day probability (float): A value between 0 and 1", "create a SkeletonScenario object using the data in the PowerScenario in conjunction with", "of the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list(", "aggregate aggregate_sources (str): The name of the aggregate source \"\"\" power_vector = [0]*24", "each source with all the power generation data for that given source. Args:", "plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This is a little hack to", "be Scenario_<name>.dat where <name> is replaced by the name of the scenario. Args:", "a collection of sources each with a proportion of the values. This will", "len(sources)*len(all_scenarios)]) columns = [] i = 0 for source_name in sorted(sources): for scenario", "the directory specified. It is necessary to pass in the date since this", "'expected': scen_name = 'forecasts' else: scen_name = scenario.name scenario_name = source_name + ':", "all the other scenarios \"\"\" name = \"\" power_dict = {} probability =", "that change from scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable Power).", "the SkeletonScenario class. Args: power_dict (dict): a dictionary mapping source names to 24-vectors", "InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\"", "scenario (i.e, Min Dispatchable Power, Max Dispatchable Power). It will store these results", "def plot(self, axis=None): \"\"\" Simple plotting routing which will plot all the power", "object Args: load_data (dict[str,List[float]]): A dictionary mapping names of load sources to 24-vectors", "forecast. forecast = self.load_data[source][i] key = source + ' ' + str(i +", "Args: aggregate_source (str): The name of the source to be disaggregated disaggregated (dict[str,float]):", "comments += '\\n' + scenario.comments # Here we drop the last underscore added", "(bool): A flag to indicate whether the source to disaggregate is a load", "Args: source_names (list[str]): Names of the sources to aggregate aggregate_sources (str): The name", "if scenario.comments: comments += '\\n' + scenario.comments # Here we drop the last", "sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string +=", "directory): \"\"\" Writes json file for this node to the specified directory Args:", "aggregate_sources (str): The name of the aggregate source \"\"\" power_vector = [0]*24 for", "children list Args: node (InternalNode): An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\"", "knowledge of the date of the scenario. Args: directory (str): The path to", "pass through the comments as well to the InternalNode # Questionable... internal_node =", "of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory,", "data): \"\"\" Copies the power generation data of the day for the next", "__lt__(self, other): return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This class should contain", "= 'Day Part Separators' for h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--')", "In the case of solar power, the passed forecast will be None if", "[] self.comments = comments def add_child(self, node): \"\"\" Adds an internal node to", "(List[PowerScenario]): The list of scenarios to merge Returns: PowerScenario: A scenario which is", "\"\"\" This will merge ScenarioWithPaths objects and return a ScenarioWithPaths objects which has", "for the sources provided and store that in a new source with the", "conjunction with the load data passed in. Note this will not copy the", "information about the scenario \"\"\" self.name = name self.probability = probability self.parent =", "root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This function will", "# Copy the power generation values for the next 24 hours. return self._copy_power_generation(data)", "label=label, color='grey', linestyle='--') label = '_nolegend_' # Display a legend. lgd = plt.legend(loc='lower", "one must pass a scenario name, a dictionary mapping source names to lists", "directory to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name,", "names to lists of 24 floats and an associated probability. Args: name (str):", "of the day for the next 24 hours, depending on the type of", "= 'root' if self.parent is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name,", "= str(forecast) + '\\n' for i in range(24): # Duplicate the load forecast", "data parameters and values that change from scenario to scenario (i.e, Min Dispatchable", "scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an object of the", "hack to get the source names, these are stored # as keys in", "+ 1) value = data[sources_key][key] elif source_type in ['wind']: key = source +", "the minimum and the maximum dispatch value, separated by a blank space \"\"\"", "= '_nolegend_' # Add forecast to the plot. if self.expected_scenario is not None:", "in range(24): for source, source_type in self.types.items(): if source_type in ['solar', 'hydro']: key", "Mw') # Create a title. plt.title(title + source, y=1.08) plt.savefig(directory + os.sep +", "path_dict) class PowerScenario: \"\"\" This class will only contain information about power generation", "of the power of the original source is_load (bool): A flag to indicate", "separators for each source if they are supposed to be in the plot", "prob, load_data, sources, comments=''): \"\"\" Initializes an object of the SkeletonScenario class. Args:", "a dictionary mapping load sources to 24-vectors sources (List[ExtendedSource]): This is just used", "\"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will add", "up all the source power vectors for the sources provided and store that", "node has an associated name, probability, data, and pointers to parents and children.", "objcts instantiated from each of the scenarios. Returns: list[CommentedRawNodeData]: The list of node", "\"\"\" # We first merge the PowerScenario objects power_scenarios = [scen.scenario for scen", "to 24-vectors of power generation values prob (float): the probability of the scenario", "daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of raw scenario nodes \"\"\" return", "self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source]", "open(directory + os.sep + filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=2) def", "of the additonal pysp information. The name of the file will be Scenario_<name>.dat", "name, probability, data=None, parent=None, comments=''): \"\"\" Initializes an object of the InternalNode class.", "change the names of the scenarios to be numbered in the form \"Scenario_i\".", "self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values')", "plt import pandas as pd from prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen", "list of scenarios including the actual and expected scenario \"\"\" def __init__(self, scenarios,", "refers to the fact that the file will only contain the 24-vectors of", "zorder=3, label='Actual', color='b') # Add dps to the plot. if dps is not", "class should contain all the data parameters and values that change from scenario", "of a set of scenarios. The root points to an internal node which", "additonal pysp information. The name of the file will be Scenario_<name>.dat where <name>", "original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method", "power_vector def plot(self, axis=None): \"\"\" Simple plotting routing which will plot all the", "plotting routine for the scenarios. This will create a plot for each source", "enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the actual", "Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525", "to disaggregate aggregate_source (str): The name of the source to be disaggregated disaggregated", "A string containing extra details about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob,", "and the associated probability and name. For each source of interest, this will", "separated by a blank space \"\"\" # In the case of solar power,", "have methods for exporting data to scenario files as well. Attributes: scenarios (list[SkeletonScenario]):", "= dispatch * forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self,", "all_scenarios: if scenario.name == 'expected': scen_name = 'forecasts' else: scen_name = scenario.name scenario_name", "original source power vectors. Args: source_names (list[str]): Names of the sources to aggregate", "load_data (dict[str,List[float]]): A dictionary mapping names of load sources to 24-vectors of load", "In this case, set it to 0. forecast = 0 if forecast is", "how scenario was created among other things \"\"\" self.name = name self.power_dict =", "= \\ data[load_key][key] # Copy the power generation values for the next 24", "write_json_files(self, output_directory): \"\"\" Writes json files for each of the scenarios in the", "children. \"\"\" def __init__(self, name, probability, data=None, parent=None, comments=''): \"\"\" Initializes an object", "freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns =", "a list of 24 values prob (float): The associated probability of the scenario", "case, set it to 0. forecast = 0 if forecast is None else", "'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other): return self.name < other.name class SkeletonScenario(PowerScenario):", "' ' + str(i + 1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value)", "# Prescient # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC", "# as keys in the dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys()) #", "___________________________________________________________________________ # # Prescient # Copyright 2020 National Technology & Engineering Solutions of", "refers to the 24-vector of the power generation values produced in a scenario", "list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i =", "An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal node into", "and a probability which is a product of all probabilities as we assume", "The name of the file will be Scenario_<name>.dat where <name> is replaced by", "(SkeletonScenario): the scenario from the actual data expected_scenario (SkeletonScenario): the scenario from the", "(float): the probability of the scenario load_data (dict[str,List[float]]): a dictionary mapping load sources", "= ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This function will normalize the", "as paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import", "fig, axis = plt.subplots() for name, vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs,", "internal node which contains actual data for each stage. \"\"\" def __init__(self): self.root", "self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the scenarios stored in the true", "' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will update the dictionary of", "the load forecast for the next 24 hours. for source in self.load_data: key", "a little hack to get the source names, these are stored # as", "self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal node into a daps-style Raw_Node_Data object.", "power_dict, prob, comments) self.load_data = load_data self.types = {source.name: source.source_type for source in", "merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which merges all the power dictionaries of", "in the PowerScenario in conjunction with the load data passed in. Note this", "the probability of the scenario load_data (dict[str,List[float]]): a dictionary mapping load sources to", "(list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The named tuple", "SkeletonScenario: The scenario with power and load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability,", "expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property returns the list", "\"\"\" power_vector = [0]*24 for name in source_names: for i, val in enumerate(self.power_dict[name]):", "the parent node comments: A string detailing information about the scenario \"\"\" self.name", "of the directory to save to title (str): The title of the plot", "__init__(self, name, power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes an object of the", "str(i + 1) data[load_key][key] = str(forecast) + '\\n' for i in range(24): #", "The path to the directory to store the files date (datetime-like): The date", "'root' if self.parent is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability)", "for every source and add all scenarios. label = 'Scenarios' for source in", "scenario nodes \"\"\" return [child.to_raw_node() for child in self.root.children] def __str__(self): return \"Tree:\\n\"", "and the corresponding paths # used to create it. The paths attribute will", "names to 24-vectors of power generation values prob (float): the probability of the", "+= \"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return", "the scenario probability (float): the probability of the scenario data: the data of", "Translate the power generation values into strings of minimum # and maximum dispatch", "Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name,", "node in the Scenario tree. Each node has an associated name, probability, data,", "Initializes an object of the SkeletonScenario class. Args: power_dict (dict): a dictionary mapping", "will create a plot for each source with all the power generation data", "return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This routine", "of the scenario. Args: directory (str): The path to the directory to store", "in a new source with the name aggregate_source. It will delete all the", "self.probability, load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\"", "= load_data self.types = {source.name: source.source_type for source in sources} self.dispatches = {source.name:", "all the power vectors for every source stored in this scenario onto the", "source + ' ' + str(i + 1) raw_value = self.power_dict[source][i] value =", "are stored # as keys in the dictionary of a scenario sources =", "power_dict (dict): A mapping from source names to lists of 24 floats of", "raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def", "of solar power, the passed forecast will be None if the # respective", "be in the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This is a", "self.probability, self.data) string += 'Children:\\n' for child in self.children: string += str(child) return", "of the values. This will update the dictionry in-place. Args: dict_ (dict): The", "self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns", "create a plot for each source with all the power generation data for", "scen in self.scenarios) for scen in self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\"", "in this sense refers to the 24-vector of the power generation values produced", "by a collection of sources each with a proportion of the values. This", "in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\" Basic", "rights in this software. # This software is distributed under the Revised BSD", "prob, comments) self.load_data = load_data self.types = {source.name: source.source_type for source in sources}", "all scenarios. label = 'Scenarios' for source in sources: plt.figure(source) for scenario in", "+ 1) data[load_key][key] = str(forecast) + '\\n' for i in range(24): # Duplicate", "the maximum dispatch value, separated by a blank space \"\"\" # In the", "\", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other): return", "'w') as f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string = \"Internal Node", "data for that given source. Args: directory (str): The name of the directory", "file in \"\"\" # if no parent specified, assume parent is root parent_name", "tree \"\"\" root = InternalNode(\"root\", probability=1) for scenario in self.scenarios: # We pass", "str(i + 1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value", "basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key", "# A dictionary of data with strings as keys and the minimum and", "import datetime import json import os from collections import OrderedDict, namedtuple import numpy", "\"\"\" if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H'))", "f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata:", "as well to the InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root,", "This property returns the list of probabilistic scenarios in addition to the actual", "sources to 24-vectors of load values sources (List[ExtendedSource]): A list of the sources", "\"\"\" return [scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self): \"\"\" This creates an", "not have any knowledge of the date of the scenario. Args: directory (str):", "(self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting routine for the", "name, power dictionaries, probabilities, comments for scenario in scenarios: name += scenario.name +", "depending on the type of the respective source. \"\"\" for i in range(24):", "all the original source power vectors. Args: source_names (list[str]): Names of the sources", "i in range(24): for source, source_type in self.types.items(): if source_type in ['solar', 'hydro']:", "Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data, sources): \"\"\" This will", "object with a merged PowerScenario and merged path dictionary \"\"\" # We first", "next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method", "write_directory: the directory to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node", "daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data,", "a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a", "for the next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\"", "from all scenarios. We assume independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A", "directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for", "'_nolegend_' # Display a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True)", "of the form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios):", "where <name> is replaced by the name of the scenario. Args: directory (str):", "\"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data) return string", "1) data[load_key][key] = str(forecast) + '\\n' for i in range(24): # Duplicate the", "comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will update the dictionary of", "return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting routine for", "# Then we merge their path dictionaries path_dict = {} for scen in", "= source + ' ' + str(i + 1) raw_value = self.power_dict[source][i] value", "plot. if self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r')", "last underscore added name = name[:-1] return PowerScenario(name, power_dict, probability, comments) # This", "= \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory + os.sep + filename, 'w') as", "the date of the scenario. Args: directory (str): The path to the directory", "{}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n' for child in self.children: string", "inner_data) return string class ScenarioTree: \"\"\" Basic Tree representation of a set of", "self.parent is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory", "= self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source in self.load_data:", "OrderedDict, namedtuple import numpy as np import matplotlib as mpl import matplotlib.pyplot as", "axis = plt.subplots() for name, vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect,", "str(self.root) class InternalNode: \"\"\" Representation for an individual node in the Scenario tree.", "scenario was created among other things \"\"\" self.name = name self.power_dict = power_dict", "This returns a list of CommentedRawNodeData objcts instantiated from each of the scenarios.", "del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\" Simple plotting routing which", "ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The named tuple object with a merged", "a 24-vector of power-values produced. Attributes: name (str): The name of the scenario", "This will create a plot for each source with all the power generation", "scenarios \"\"\" name = \"\" power_dict = {} probability = 1 comments =", "the forecast. Args: dispatch (float): The fraction nondispatchable forecast (float): the forecast value", "__repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items():", "power generation values into strings of minimum # and maximum dispatch values. key", "list of the sources used in the scenario Returns: SkeletonScenario: The scenario with", "\"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting routine", "to the proportion of the power of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source,", "does not have any knowledge of the date of the scenario. Args: directory", "Args: power_dict (dict): a dictionary mapping source names to 24-vectors of power generation", "ncol=3, shadow=True) # Display a grid and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k')", "is a dictionary mapping source names to a list of 24 values prob", "columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values i += 1", "actual and forecast data. Args: write_directory: the directory to write in \"\"\" actual_node", "will store these results in a dictionary called 'data'. \"\"\" def __init__(self, name,", "1) value = data[sources_key][key] elif source_type in ['wind']: key = source + '", "the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour')", "filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string = \"Internal", "to \"\"\" if axis is None: fig, axis = plt.subplots() for name, vect", "Additional details about how scenario was created among other things \"\"\" def __init__(self,", "+ str(i + 1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] =", "and children. \"\"\" def __init__(self, name, probability, data=None, parent=None, comments=''): \"\"\" Initializes an", "the values. Args: aggregate_source (str): The name of the source to be disaggregated", "zorder=3, label='Forecast', color='r') if self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3,", "in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we merge their path dictionaries path_dict", "produced. Attributes: name (str): The name of the scenario power_dict (dict): A mapping", "= namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects and", "get the source names, these are stored # as keys in the dictionary", "(float): the probability of the scenario data: the data of the scenario parent:", "merge name, power dictionaries, probabilities, comments for scenario in scenarios: name += scenario.name", "set_root(self, node): self.root = node def write_json_files(self, output_directory): \"\"\" Writes json files for", "numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import pandas", "if the # respective hour lies outside the hours of sunshine. # In", "for name, proportion in disaggregated.items(): source_power = [proportion*value for value in aggregated_power] dict_[name]", "given source. Args: directory (str): The name of the directory to save to", "which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw')", "scen.probability /= prob_sum def normalize_names(self): \"\"\" This function will change the names of", "will store a 24-vector of power-values produced. Attributes: name (str): The name of", "by a collection of sources each with a proportion of the values. Args:", "raw data for this scenario. The raw data in this sense refers to", "first merge the PowerScenario objects power_scenarios = [scen.scenario for scen in scenarios] scenario", "24-vector of the power generation values produced in a scenario without any of", "from all scenarios as well as the paths from all scenarios. We assume", "strings of minimum # and maximum dispatch values. key = source + '", "to a list of 24 values prob (float): The associated probability of the", "self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario", "instantiated from each of the scenarios. Returns: list[CommentedRawNodeData]: The list of node data", "probability, data=None, parent=None, comments=''): \"\"\" Initializes an object of the InternalNode class. Args:", "if no parent specified, assume parent is root parent_name = 'root' if self.parent", "InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal node into a", "detailing information about the scenario \"\"\" self.name = name self.probability = probability self.parent", "The name of the source to be disaggregated disaggregated (dict[str,float]): A dictionary mapping", "with power and load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments)", "Copy the power generation values for the next 24 hours. return self._copy_power_generation(data) def", "actual data expected_scenario (SkeletonScenario): the scenario from the forecast data all_scenarios (list[SkeletonScenario]): The", "paths attribute will point to a dictionary # of the form {source_name ->", "dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a plot for every", "for i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes", "date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns", "= name self.power_dict = power_dict self.probability = prob self.comments = comments def disaggregate_source(self,", "filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory + os.sep + filename, 'w')", "the name of the scenario probability (float): the probability of the scenario data:", "[child.to_raw_node() for child in self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode:", "+ '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments += '\\n' + scenario.comments", "object of the SkeletonScenario class. Args: power_dict (dict): a dictionary mapping source names", "assume independence across the scenarios. Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects", "vectors for the sources provided and store that in a new source with", "scenario.name == 'expected': scen_name = 'forecasts' else: scen_name = scenario.name scenario_name = source_name", "The list of scenarios actual (SkeletonScenario): The actual scenario expected (SkeletonScenario): The expected", "the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain", "\"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string +=", "tree \"\"\" for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the", "dict_[aggregate_source] for name, proportion in disaggregated.items(): source_power = [proportion*value for value in aggregated_power]", "= '_nolegend_' # Display a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3,", "grid and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the", "data to scenario files as well. Attributes: scenarios (list[SkeletonScenario]): a list of scenarios", "Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self): return", "in. Note this will not copy the values, so if they are changed", "scenarios including the actual and expected scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None):", "a SkeletonScenario object using the data in the PowerScenario in conjunction with the", "in the form \"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1)", "None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps to the", "disaggregated, is_load=False): \"\"\" This method will update the dictionary of power values by", "matplotlib.pyplot as plt import pandas as pd from prescient.gosm.structures import skeleton_point_paths as paths", "index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a", "The raw data in this sense refers to the 24-vector of the power", "'_nolegend_' # Add forecast to the plot. if self.expected_scenario is not None: forecast_range", "a load source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other", "plot(self, axis=None): \"\"\" Simple plotting routing which will plot all the power vectors", "return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class will only contain information about", "merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects and return a ScenarioWithPaths objects which", "actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps to the plot.", "raw scenario files to the directory specified. Raw refers to the fact that", "name = name[:-1] return PowerScenario(name, power_dict, probability, comments) # This will have a", "create one if none is passed in). Args: axis: The axis to plot", "file will be Scenario_<name>.dat where <name> is replaced by the name of the", "= source + ' ' + str(i + 25) data[sources_key][key] = value return", "stored in the true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of", "the next 24 hours. for source in self.load_data: key = source + '", "they will be changed in the newly created object Args: load_data (dict[str,List[float]]): A", "\"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string def", "Converts the internal node into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node", "# Save the load forecast. forecast = self.load_data[source][i] key = source + '", "np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i = 0 for source_name in sorted(sources): for", "used in the scenario Returns: SkeletonScenario: The scenario with power and load values", "of the new sources to the proportion of the power of the original", "name aggregate_source. It will delete all the original source power vectors. Args: source_names", "+ 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data:", "' ' + str(i + 1) data[load_key][key] = str(forecast) + '\\n' for i", "which merges all the power dictionaries of the PowerScenario objects passed in. It", "string += \"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string += \"{}: {}\\n\".format(inner_key, inner_data)", "= value return data def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object from", "\"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data in", "parent: the parent node comments: A string detailing information about the scenario \"\"\"", "for scenario in self.scenarios: # We pass through the comments as well to", "used to get the source types comments (str): A string containing extra details", "source in self.power_dict: # Translate the power generation values into strings of minimum", "the newly created object Args: load_data (dict[str,List[float]]): A dictionary mapping names of load", "as mpl import matplotlib.pyplot as plt import pandas as pd from prescient.gosm.structures import", "parents and children. \"\"\" def __init__(self, name, probability, data=None, parent=None, comments=''): \"\"\" Initializes", "and maximum # dispatch values as (str) values. data = {sources_key: OrderedDict(), load_key:", "passed in (it will create one if none is passed in). Args: axis:", "raw scenario nodes \"\"\" return [child.to_raw_node() for child in self.root.children] def __str__(self): return", "extra details about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data =", "Creates a daps-style Raw_Node_Data object from the scenario. Sets the parent to root", "import matplotlib.pyplot as plt import pandas as pd from prescient.gosm.structures import skeleton_point_paths as", "= InternalNode(\"root\", probability=1) for scenario in self.scenarios: # We pass through the comments", "def __str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation for an individual", "details about how scenario was created among other things \"\"\" self.name = name", "scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\"", "(SkeletonScenario): The actual scenario expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios = scenarios", "scenario values. \"\"\" # A dictionary of data with strings as keys and", "class should manage all single skeleton scenarios and have methods for exporting data", "data def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object from the scenario. Sets", "and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the axes.", "The scenario with power and load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data,", "The axis plotted to \"\"\" if axis is None: fig, axis = plt.subplots()", "the children list Args: node (InternalNode): An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self):", "self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function writes out the raw data for", "self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i = 0 for source_name", "scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23),", "types comments (str): A string containing extra details about the scenario \"\"\" PowerScenario.__init__(self,", "form \"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self,", "for i in range(24): for source, source_type in self.types.items(): if source_type in ['solar',", "property returns the list of probabilistic scenarios in addition to the actual scenario", "plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power in", "of sources each with a proportion of the values. This will update the", "for exporting data to scenario files as well. Attributes: scenarios (list[SkeletonScenario]): a list", "sources to 24-vectors sources (List[ExtendedSource]): This is just used to get the source", "underscore added name = name[:-1] return PowerScenario(name, power_dict, probability, comments) # This will", "scenarios actual (SkeletonScenario): The actual scenario expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios", "plotting routing which will plot all the power vectors for every source stored", "expected=None): \"\"\" Initializes an object of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The", "all scenarios as well as the paths from all scenarios. We assume independence", "the source to disaggregate is a load source \"\"\" if is_load: disaggregate_dict(self.load_data) else:", "indent=2) def __str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data)", "objects \"\"\" return [scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self): \"\"\" This creates", "\"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory)", "= source + ' ' + str(i + 1) data[load_key][key] = str(forecast) +", "\"\"\" Initializes an object of the InternalNode class. Args: name (str): the name", "in-place. Args: dict_ (dict): The dictionry to disaggregate aggregate_source (str): The name of", "dictionary of power values by replacing the values for the specified source by", "merging all the other scenarios \"\"\" name = \"\" power_dict = {} probability", "which will plot all the power vectors for every source stored in this", "= list(self.scenarios[0].power_dict.keys()) # Create a plot for every source and add all scenarios.", "directory, date): \"\"\" This routine should write all of the raw scenario files", "scenario names, and a probability which is a product of all probabilities as", "of 24 floats and an associated probability. Args: name (str): The name of", "object from the scenario. Sets the parent to root currently. Returns: Raw_Node_Data: The", "we drop the last underscore added name = name[:-1] return PowerScenario(name, power_dict, probability,", "the minimum and the maximum dispatch value for the forecast. Args: dispatch (float):", "def __init__(self): self.root = None def set_root(self, node): self.root = node def write_json_files(self,", "the additonal pysp information. The name of the file will be Scenario_<name>.dat where", "scenario without any of the additonal pysp information. The name of the file", "load sources to 24-vectors sources (List[ExtendedSource]): This is just used to get the", "has an associated name, probability, data, and pointers to parents and children. \"\"\"", "\"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \", \".join(map(str,", "dictionary mapping load sources to 24-vectors sources (List[ExtendedSource]): This is just used to", "Prescient # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC #", "output_directory): \"\"\" Writes json files for each of the scenarios in the tree", "label = 'Scenarios' for source in sources: plt.figure(source) for scenario in self.scenarios: source_scenario", "a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a plot for every source and", "The name of the aggregate source \"\"\" power_vector = [0]*24 for name in", "store these results in a dictionary called 'data'. \"\"\" def __init__(self, name, power_dict,", "Raw_Node_Data object from the scenario. Sets the parent to root currently. Returns: Raw_Node_Data:", "day part separators for each source if they are supposed to be in", "each source of interest, this will store a 24-vector of power-values produced. Attributes:", "= scenarios self.actual_scenario = actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property def", "f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in", "sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = []", "probability self.parent = parent self.data = data self.children = [] self.comments = comments", "self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key,", "return value def _copy_power_generation(self, data): \"\"\" Copies the power generation data of the", "PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source]", "columns = [] i = 0 for source_name in sorted(sources): for scenario in", "paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory", "(i.e, Min Dispatchable Power, Max Dispatchable Power). It will store these results in", "single skeleton scenarios and have methods for exporting data to scenario files as", "the scenarios. This will create a plot for each source with all the", "self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source in self.load_data: # Save the load", "software. # This software is distributed under the Revised BSD License. # ___________________________________________________________________________", "name of the scenario power_dict (dict[str,List[float]]): This is a dictionary mapping source names", "the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data, sources):", "scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments += '\\n' +", "json-files for the actual and forecast data. Args: write_directory: the directory to write", "source in self.load_data: # Save the load forecast. forecast = self.load_data[source][i] key =", "\"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def", "names, and a probability which is a product of all probabilities as we", "source power vectors. Args: source_names (list[str]): Names of the sources to aggregate aggregate_sources", "the power vectors for every source stored in this scenario onto the axis", "specified. Raw refers to the fact that the file will only contain the", "\"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in", "disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will add up all the", "for source, source_type in self.types.items(): if source_type in ['solar', 'hydro']: key = source", "forecast value Returns: string: the minimum and the maximum dispatch value, separated by", "replacing the values for the specified source by a collection of sources each", "self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function", "(str): The title of the plot dps (dict): the day part separators for", "+ ' ' + str(i + 1) value = data[sources_key][key] elif source_type in", "child in self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation", "def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting routine for the scenarios. This", "Writes json-files for the actual and forecast data. Args: write_directory: the directory to", "def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object from the scenario. Sets the", "indicate whether the source to disaggregate is a load source \"\"\" if is_load:", "in a scenario without any of the additonal pysp information. The name of", "plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') #", "(SkeletonScenario): the scenario from the forecast data all_scenarios (list[SkeletonScenario]): The list of scenarios", "the scenarios to be numbered in the form \"Scenario_i\". \"\"\" for i, scenario", "which is the concatenation of all scenario names, and a probability which is", "about power generation and the associated probability and name. For each source of", "dictionary of data with strings as keys and the minimum and maximum #", "\"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This", "list of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def write_raw_scenarios(self,", "this will store a 24-vector of power-values produced. Attributes: name (str): The name", "an object of the SkeletonScenario class. Args: power_dict (dict): a dictionary mapping source", "[proportion*value for value in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This class", "names, these are stored # as keys in the dictionary of a scenario", "['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects and return a", "dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum and the maximum dispatch value for", "This method will update the dictionary of power values by replacing the values", "a dictionary called 'data'. \"\"\" def __init__(self, name, power_dict, prob, load_data, sources, comments=''):", "name of the scenario probability (float): the probability of the scenario data: the", "is_load=False): \"\"\" This method will update the dictionary of power values by replacing", "Create a title. plt.title(title + source, y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,),", "24-vectors of load values sources (List[ExtendedSource]): A list of the sources used in", "as f: f.write('Probability: {}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value", "string += \"{}: {}\\n\".format(inner_key, inner_data) return string class ScenarioTree: \"\"\" Basic Tree representation", "comments def add_child(self, node): \"\"\" Adds an internal node to the children list", "zorder=2, label=label, marker='o', color='g') label = '_nolegend_' # Add forecast to the plot.", "dictionary # of the form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths'])", "list of scenarios actual_scenario (SkeletonScenario): the scenario from the actual data expected_scenario (SkeletonScenario):", "corresponding paths # used to create it. The paths attribute will point to", "the scenario Returns: SkeletonScenario: The scenario with power and load values \"\"\" return", "in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_' # Display a", "= 0 if forecast is None else forecast min_dispatch = dispatch * forecast", "merge ScenarioWithPaths objects and return a ScenarioWithPaths objects which has the power generation", "key = source + ' ' + str(i + 1) data[load_key][key] = str(forecast)", "self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\" Simple plotting routing which will", "of power values by replacing the values for the specified source by a", "SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario): The actual", "to the directory to store the files date (datetime-like): The date of the", "# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC # (NTESS).", "# We merge name, power dictionaries, probabilities, comments for scenario in scenarios: name", "Returns: string: the minimum and the maximum dispatch value, separated by a blank", "power generation vectors from all scenarios as well as the paths from all", "plt.figure(source) for scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o',", "dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines the", "keys to scenario values. \"\"\" # A dictionary of data with strings as", "among other things \"\"\" self.name = name self.power_dict = power_dict self.probability = prob", "the source to be disaggregated disaggregated (dict[str,float]): A dictionary mapping names of the", "forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding", "plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label = '_nolegend_' # Add forecast to", "for the next 24 hours. for source in self.load_data: key = source +", "scenario from the actual data expected_scenario (SkeletonScenario): the scenario from the forecast data", "*= scenario.probability if scenario.comments: comments += '\\n' + scenario.comments # Here we drop", "scenario_name = source_name + ': ' + scen_name columns.append(scenario_name) values = [scenario.probability] +", "# In the case of solar power, the passed forecast will be None", "__str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string += \"{}:\\n\".format(key) for", "aggregate_source, disaggregated): \"\"\" This method will update the dictionary of power values by", "to the fact that the file will only contain the 24-vectors of the", "ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class will only contain information about power", "all of the raw scenario files to the directory specified. Raw refers to", "self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This class should contain all the data", "self.root = None def set_root(self, node): self.root = node def write_json_files(self, output_directory): \"\"\"", "if axis is None: fig, axis = plt.subplots() for name, vect in self.power_dict.items():", "will only contain the 24-vectors of the power generation and the probabilities. This", "for the next 24 hours, depending on the type of the respective source.", "of the scenario power_dict (dict): A mapping from source names to lists of", "specified, assume parent is root parent_name = 'root' if self.parent is None else", "in the Scenario tree. Each node has an associated name, probability, data, and", "class. Args: power_dict (dict): a dictionary mapping source names to 24-vectors of power", "tree. Each node has an associated name, probability, data, and pointers to parents", "Args: dict_ (dict): The dictionry to disaggregate aggregate_source (str): The name of the", "# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. #", "val in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self,", "in conjunction with the load data passed in. Note this will not copy", "in self.types.items(): if source_type in ['solar', 'hydro']: key = source + ' '", "of the sources to aggregate aggregate_sources (str): The name of the aggregate source", "= comments def add_child(self, node): \"\"\" Adds an internal node to the children", "def add_child(self, node): \"\"\" Adds an internal node to the children list Args:", "scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self,", "tree = ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This function will normalize", "the last underscore added name = name[:-1] return PowerScenario(name, power_dict, probability, comments) #", "PowerScenario(name, power_dict, probability, comments) # This will have a PowerScenario object and the", "will be Scenario_<name>.dat where <name> is replaced by the name of the scenario.", "' + scen_name columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values", "= data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has type '{}', the only \"", "\"\"\" This function will change the names of the scenarios to be numbered", "# Display a grid and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k')", "Writes json file for this node to the specified directory Args: directory: the", "path to the directory to store the scenario file \"\"\" scen_file = directory", "source in sources} def scenario_data(self): \"\"\" This will construct the dictionary mapping keys", "will update the dictionry in-place. Args: dict_ (dict): The dictionry to disaggregate aggregate_source", "os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source in", "'wind', \" \"and 'hydro'.\".format(source, source_type)) key = source + ' ' + str(i", "the files date (datetime-like): The date of the scenarios \"\"\" if not os.path.isdir(directory):", "to store the json file in \"\"\" # if no parent specified, assume", "skeleton scenarios and have methods for exporting data to scenario files as well.", "\"\"\" This returns a list of CommentedRawNodeData objcts instantiated from each of the", "every source stored in this scenario onto the axis passed in (it will", "the corresponding paths # used to create it. The paths attribute will point", "self.parent = parent self.data = data self.children = [] self.comments = comments def", "merge Returns: PowerScenario: A scenario which is formed by merging all the other", "little hack to get the source names, these are stored # as keys", "not copy the values, so if they are changed by some other function,", "directory specified. Raw refers to the fact that the file will only contain", "__str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation for an individual node", "sources} self.dispatches = {source.name: source.frac_nondispatch for source in sources} def scenario_data(self): \"\"\" This", "data objects \"\"\" return [scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self): \"\"\" This", "the case of solar power, the passed forecast will be None if the", "value = data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has type '{}', the only", "the power of the original source is_load (bool): A flag to indicate whether", "data[sources_key][key] else: raise RuntimeError(\"Power source '{}' has type '{}', the only \" \"types", "sources (List[ExtendedSource]): A list of the sources used in the scenario Returns: SkeletonScenario:", "the probabilities of the scenarios so that they add up to 1. \"\"\"", "\"\"\" Simple plotting routing which will plot all the power vectors for every", "i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files", "probability, comments) # This will have a PowerScenario object and the corresponding paths", "{source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will", "RuntimeError(\"Power source '{}' has type '{}', the only \" \"types recognized are 'solar',", "+ os.sep + filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self):", "have any knowledge of the date of the scenario. Args: directory (str): The", "(list[str]): Names of the sources to aggregate aggregate_sources (str): The name of the", "probabilities of the scenarios so that they add up to 1. \"\"\" prob_sum", "original source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for name, proportion in disaggregated.items():", "return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation for an individual node in", "plot for every source and add all scenarios. label = 'Scenarios' for source", "self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps to the plot. if dps", "in source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source]", "Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this", "point to a dictionary # of the form {source_name -> OneDimPath} ScenarioWithPaths =", "National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the terms", "the probabilities. This will create a file called 'scenarios.csv' in the directory specified.", "def __lt__(self, other): return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This class should", "the load forecast. forecast = self.load_data[source][i] key = source + ' ' +", "write_directory): \"\"\" Writes json-files for the actual and forecast data. Args: write_directory: the", "of scenarios actual (SkeletonScenario): The actual scenario expected (SkeletonScenario): The expected scenario \"\"\"", "for i in range(24): for source in self.power_dict: # Translate the power generation", "names of the scenarios to be numbered in the form \"Scenario_i\". \"\"\" for", "certain rights in this software. # This software is distributed under the Revised", "contains actual data for each stage. \"\"\" def __init__(self): self.root = None def", "power and load values \"\"\" return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def", "of the power generation values produced in a scenario without any of the", "formed by merging all the other scenarios \"\"\" name = \"\" power_dict =", "axis to plot to Returns: axis: The axis plotted to \"\"\" if axis", "name, probability, data, and pointers to parents and children. \"\"\" def __init__(self, name,", "names of load sources to 24-vectors of load values sources (List[ExtendedSource]): A list", "under the Revised BSD License. # ___________________________________________________________________________ import datetime import json import os", "forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is not None: actual_range", "\"\"\" This will create a SkeletonScenario object using the data in the PowerScenario", "probability of the scenario load_data (dict[str,List[float]]): a dictionary mapping load sources to 24-vectors", "This is a little hack to get the source names, these are stored", "as pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import", "actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability, self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def", "called 'data'. \"\"\" def __init__(self, name, power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes", "which is a product of all probabilities as we assume independence. Args: scenarios", "= {sources_key: OrderedDict(), load_key: OrderedDict()} for i in range(24): for source in self.power_dict:", "\"\"\" Converts the internal node into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw", "key = source + ' ' + str(i + 1) raw_value = self.power_dict[source][i]", "the axis passed in (it will create one if none is passed in).", "Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None):", "values that change from scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable", "name[:-1] return PowerScenario(name, power_dict, probability, comments) # This will have a PowerScenario object", "range(24): for source, source_type in self.types.items(): if source_type in ['solar', 'hydro']: key =", "Basic Tree representation of a set of scenarios. The root points to an", "one if none is passed in). Args: axis: The axis to plot to", "dictionary called 'data'. \"\"\" def __init__(self, name, power_dict, prob, load_data, sources, comments=''): \"\"\"", "self.load_data[source][i] key = source + ' ' + str(i + 1) data[load_key][key] =", "<name> is replaced by the name of the scenario. Args: directory (str): A", "to store the files date (datetime-like): The date of the scenarios \"\"\" if", "tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This function will normalize the probabilities of", "name, power_dict, prob, comments) self.load_data = load_data self.types = {source.name: source.source_type for source", "for source_name, power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector)))", "all scenario names, and a probability which is a product of all probabilities", "to the proportion of the power of the original source is_load (bool): A", "= '' # We merge name, power dictionaries, probabilities, comments for scenario in", "was created among other things \"\"\" self.name = name self.power_dict = power_dict self.probability", "# This software is distributed under the Revised BSD License. # ___________________________________________________________________________ import", "{source.name: source.source_type for source in sources} self.dispatches = {source.name: source.frac_nondispatch for source in", "NTESS, the U.S. # Government retains certain rights in this software. # This", "by replacing the values for the specified source by a collection of sources", "and have methods for exporting data to scenario files as well. Attributes: scenarios", "the actual and expected scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes", "value, separated by a blank space \"\"\" # In the case of solar", "= power_dict self.probability = prob self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\"", "is distributed under the Revised BSD License. # ___________________________________________________________________________ import datetime import json", "vectors from all scenarios as well as the paths from all scenarios. We", "in (it will create one if none is passed in). Args: axis: The", "directory (str): The path to the directory to store the files date (datetime-like):", "and name. For each source of interest, this will store a 24-vector of", "the raw scenario files to the directory specified. Raw refers to the fact", "in self.scenarios) for scen in self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\" This", "= pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This", "a product of all probabilities as we assume independence. Args: scenarios (List[PowerScenario]): The", "bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a grid and the axes. plt.grid(True, which='both')", "the PowerScenario objects power_scenarios = [scen.scenario for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios)", "{} for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This", "set of scenarios. The root points to an internal node which contains actual", "Each node has an associated name, probability, data, and pointers to parents and", "the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node())", "the power generation values produced in a scenario without any of the additonal", "+ ': ' + scen_name columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i]", "name (str): the name of the scenario probability (float): the probability of the", "the Scenario Tree class using self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\" root", "= self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i = 0 for", "power vectors for the sources provided and store that in a new source", "object. Returns: (Raw_Node_Data): raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name,", "None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory + os.sep", "scenario = merge_independent_scenarios(power_scenarios) # Then we merge their path dictionaries path_dict = {}", "tuple object with a merged PowerScenario and merged path dictionary \"\"\" # We", "of the scenario probability (float): the probability of the scenario data: the data", "disaggregated.items(): source_power = [proportion*value for value in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet:", "merged PowerScenario and merged path dictionary \"\"\" # We first merge the PowerScenario", "add_load_data(self, load_data, sources): \"\"\" This will create a SkeletonScenario object using the data", "the passed forecast will be None if the # respective hour lies outside", "self.comments = comments def add_child(self, node): \"\"\" Adds an internal node to the", "as well as the paths from all scenarios. We assume independence across the", "of the scenarios so that they add up to 1. \"\"\" prob_sum =", "sources to the proportion of the power of the original source \"\"\" disaggregate_dict(self.power_dict,", "fraction nondispatchable forecast (float): the forecast value Returns: string: the minimum and the", "add_child(self, node): \"\"\" Adds an internal node to the children list Args: node", "{}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data, sources): \"\"\" This will create a", "' + str(i + 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy the", "provided and store that in a new source with the name aggregate_source. It", "supposed to be in the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This", "that they add up to 1. \"\"\" prob_sum = sum(scen.probability for scen in", "Args: directory (str): The name of the directory to save to title (str):", "in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None):", "dispatch values. key = source + ' ' + str(i + 1) raw_value", "mapping source names to 24-vectors of power generation values prob (float): the probability", "and add all scenarios. label = 'Scenarios' for source in sources: plt.figure(source) for", "'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for", "(dict): A mapping from source names to lists of 24 floats of power", "OrderedDict()} for i in range(24): for source in self.power_dict: # Translate the power", "sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will", "as we assume independence. Args: scenarios (List[PowerScenario]): The list of scenarios to merge", "'{}' has type '{}', the only \" \"types recognized are 'solar', 'wind', \"", "= list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data = np.zeros([25, len(sources)*len(all_scenarios)]) columns = [] i", "title, dps=None): \"\"\" Basic plotting routine for the scenarios. This will create a", "\"\"\" Determines the minimum and the maximum dispatch value for the forecast. Args:", "The expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario = expected", "the directory to store the files date (datetime-like): The date of the scenarios", "disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self,", "in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the scenarios stored in the", "is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory +", "source_power class SkeletonScenarioSet: \"\"\" This class should manage all single skeleton scenarios and", "of sources each with a proportion of the values. Args: aggregate_source (str): The", "path_dict = {} for scen in scenarios: path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario:", "of the scenario parent: the parent node comments: A string detailing information about", "pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes json file", "mapping names of the new sources to the proportion of the power of", "source_name + ': ' + scen_name columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name]", "comments=''): \"\"\" Initializes an object of the SkeletonScenario class. Args: power_dict (dict): a", "source_names (list[str]): Names of the sources to aggregate aggregate_sources (str): The name of", "(str): A path to the directory to store the scenario file \"\"\" scen_file", "scenarios (List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario): The actual scenario expected (SkeletonScenario):", "so that they add up to 1. \"\"\" prob_sum = sum(scen.probability for scen", "sort_keys=True, indent=2) def __str__(self): string = \"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability,", "ScenarioWithPaths objects which has the power generation vectors from all scenarios as well", "type '{}', the only \" \"types recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source,", "other things \"\"\" self.name = name self.power_dict = power_dict self.probability = prob self.comments", "to merge Returns: ScenarioWithPaths: The named tuple object with a merged PowerScenario and", "to lists of 24 floats and an associated probability. Args: name (str): The", "from each of the scenarios. Returns: list[CommentedRawNodeData]: The list of node data objects", "'{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the actual and forecast data.", "disaggregate aggregate_source (str): The name of the source to be disaggregated disaggregated (dict[str,float]):", "root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name,", "string += \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return", "some other function, they will be changed in the newly created object Args:", "probabilities, comments for scenario in scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict) probability", "to be numbered in the form \"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios):", "probabilities as we assume independence. Args: scenarios (List[PowerScenario]): The list of scenarios to", "of the source to be disaggregated disaggregated (dict[str,float]): A dictionary mapping names of", "\"\"\" return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes", "load_data self.types = {source.name: source.source_type for source in sources} self.dispatches = {source.name: source.frac_nondispatch", "a grid and the axes. plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') # Name", "addition to the actual scenario and the expected scenario. Returns: list[SkeletonScenario]: The list", "Basic plotting routine for the scenarios. This will create a plot for each", "# Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a title. plt.title(title", "true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of raw scenario nodes", "load_data, sources): \"\"\" This will create a SkeletonScenario object using the data in", "directory: the directory to store the json file in \"\"\" # if no", "assume parent is root parent_name = 'root' if self.parent is None else self.parent.name", "(str): The name of the scenario power_dict (dict): A mapping from source names", "of scenarios to merge Returns: PowerScenario: A scenario which is formed by merging", "actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property", "software is distributed under the Revised BSD License. # ___________________________________________________________________________ import datetime import", "will point to a dictionary # of the form {source_name -> OneDimPath} ScenarioWithPaths", "load data passed in. Note this will not copy the values, so if", "raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source in", "comments (str): A string containing extra details about the scenario \"\"\" PowerScenario.__init__(self, name,", "= {source.name: source.frac_nondispatch for source in sources} def scenario_data(self): \"\"\" This will construct", "for scen in self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\" This function will", "the forecast data all_scenarios (list[SkeletonScenario]): The list of scenarios including the actual and", "if self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if", "routine should write all of the raw scenario files to the directory specified.", "+= scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments += '\\n'", "produced in a scenario without any of the additonal pysp information. The name", "new source with the name aggregate_source. It will delete all the original source", "ScenarioTree: \"\"\" Basic Tree representation of a set of scenarios. The root points", "+ scenario.comments # Here we drop the last underscore added name = name[:-1]", "scenario power_dict (dict): A mapping from source names to lists of 24 floats", "in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string += 'Probability:", "to merge Returns: PowerScenario: A scenario which is formed by merging all the", "as (str) values. data = {sources_key: OrderedDict(), load_key: OrderedDict()} for i in range(24):", "The fraction nondispatchable forecast (float): the forecast value Returns: string: the minimum and", "def _copy_power_generation(self, data): \"\"\" Copies the power generation data of the day for", "of CommentedRawNodeData objcts instantiated from each of the scenarios. Returns: list[CommentedRawNodeData]: The list", "dps is not None: label = 'Day Part Separators' for h in dps[source]:", "scenarios so that they add up to 1. \"\"\" prob_sum = sum(scen.probability for", "is replaced by the name of the scenario. Args: directory (str): A path", "of the scenario comments (str): Additional details about how scenario was created among", "if source_type in ['solar', 'hydro']: key = source + ' ' + str(i", "in scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments:", "\"\"\" if axis is None: fig, axis = plt.subplots() for name, vect in", "date (datetime-like): The date of the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index", "Separators' for h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_'", "{}\\n\".format( source_name, \", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string def __lt__(self,", "\"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data = load_data self.types = {source.name: source.source_type", "def __init__(self, name, power_dict, prob, load_data, sources, comments=''): \"\"\" Initializes an object of", "dictionary mapping names of the new sources to the proportion of the power", "in sorted(sources): for scenario in all_scenarios: if scenario.name == 'expected': scen_name = 'forecasts'", "data passed in. Note this will not copy the values, so if they", "'+str(i+25)] = \\ data[load_key][key] # Copy the power generation values for the next", "scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for", "in ['solar', 'hydro']: key = source + ' ' + str(i + 1)", "data = {sources_key: OrderedDict(), load_key: OrderedDict()} for i in range(24): for source in", "other scenarios \"\"\" name = \"\" power_dict = {} probability = 1 comments", "in a dictionary called 'data'. \"\"\" def __init__(self, name, power_dict, prob, load_data, sources,", "PowerScenario: \"\"\" This class will only contain information about power generation and the", "axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data, sources): \"\"\" This", "= \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string += \"{}:", "def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the actual and forecast data. Args:", "sources to the proportion of the power of the original source \"\"\" aggregated_power", "load values sources (List[ExtendedSource]): A list of the sources used in the scenario", "title. plt.title(title + source, y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source)", "self.name, 'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name)", "+ filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=2) def __str__(self): string =", "to root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data,", "i += 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv')", "{} probability = 1 comments = '' # We merge name, power dictionaries,", "scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self):", "+ '\\n' for i in range(24): # Duplicate the load forecast for the", "as well. Attributes: scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario): the scenario", "the PowerScenario objects passed in. It will construct a name which is the", "file called 'scenarios.csv' in the directory specified. It is necessary to pass in", "source. \"\"\" for i in range(24): for source, source_type in self.types.items(): if source_type", "Args: name (str): The name of the scenario power_dict (dict[str,List[float]]): This is a", "\"\"\" Representation for an individual node in the Scenario tree. Each node has", "SkeletonScenario(PowerScenario): \"\"\" This class should contain all the data parameters and values that", "+ os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a", "list[CommentedRawNodeData]: The list of node data objects \"\"\" return [scenario.to_raw_node() for scenario in", "def create_raw_nodes(self): \"\"\" This returns a list of CommentedRawNodeData objcts instantiated from each", "name of the aggregate source \"\"\" power_vector = [0]*24 for name in source_names:", "source_name, power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \", \".join(map(str, power_vector))) string", "to pass in the date since this object does not have any knowledge", "InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return tree def", "This routine should write all of the raw scenario files to the directory", "value def _copy_power_generation(self, data): \"\"\" Copies the power generation data of the day", "well. Attributes: scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario): the scenario from", "actual and expected scenario \"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an", "for source in sources} self.dispatches = {source.name: source.frac_nondispatch for source in sources} def", "\"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation for an individual node in the", "dictionary mapping source names to 24-vectors of power generation values prob (float): the", "in the date since this object does not have any knowledge of the", "delete all the original source power vectors. Args: source_names (list[str]): Names of the", "= probability self.parent = parent self.data = data self.children = [] self.comments =", "node): \"\"\" Adds an internal node to the children list Args: node (InternalNode):", "only \" \"types recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key =", "self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property returns the list of", "for source in sources: plt.figure(source) for scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario,", "is a product of all probabilities as we assume independence. Args: scenarios (List[PowerScenario]):", "i in range(24): # Duplicate the load forecast for the next 24 hours.", "Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data, sources): \"\"\"", "merged path dictionary \"\"\" # We first merge the PowerScenario objects power_scenarios =", "['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios = self.all_scenarios data", "A dictionary mapping names of load sources to 24-vectors of load values sources", "# Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree()", "root parent_name = 'root' if self.parent is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format(", "merge_independent_scenarios(power_scenarios) # Then we merge their path dictionaries path_dict = {} for scen", "self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data", "object, one must pass a scenario name, a dictionary mapping source names to", "onto the axis passed in (it will create one if none is passed", "source types comments (str): A string containing extra details about the scenario \"\"\"", "created object Args: load_data (dict[str,List[float]]): A dictionary mapping names of load sources to", "This will merge ScenarioWithPaths objects and return a ScenarioWithPaths objects which has the", "This class should contain all the data parameters and values that change from", "as keys and the minimum and maximum # dispatch values as (str) values.", "\\ sorted(self.scenarios) def write_raw_scenarios(self, directory, date): \"\"\" This routine should write all of", "comments=self.comments) def write_json(self, directory): \"\"\" Writes json file for this node to the", "and values that change from scenario to scenario (i.e, Min Dispatchable Power, Max", "i, val in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def", "of raw scenario nodes \"\"\" return [child.to_raw_node() for child in self.root.children] def __str__(self):", "how scenario was created among other things \"\"\" def __init__(self, name, power_dict, prob,", "U.S. # Government retains certain rights in this software. # This software is", "function, they will be changed in the newly created object Args: load_data (dict[str,List[float]]):", "= name self.probability = probability self.parent = parent self.data = data self.children =", "# Display a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) #", "scenarios to merge Returns: PowerScenario: A scenario which is formed by merging all", "self.load_data: key = source + ' ' + str(i + 1) data[load_key][source+' '+str(i+25)]", "(InternalNode): An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal node", "source to disaggregate is a load source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self,", "aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will add up all", "# respective hour lies outside the hours of sunshine. # In this case,", "\"\"\" This creates a scenario which merges all the power dictionaries of the", "among other things \"\"\" def __init__(self, name, power_dict, prob, comments=''): \"\"\" To initialize", "The name of the scenario power_dict (dict[str,List[float]]): This is a dictionary mapping source", "the plot. if dps is not None: label = 'Day Part Separators' for", "node to the children list Args: node (InternalNode): An InternalNode object \"\"\" self.children.append(node)", "a daps-style Raw_Node_Data object from the scenario. Sets the parent to root currently.", "A collection of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The named tuple object", "title (str): The title of the plot dps (dict): the day part separators", "specified source by a collection of sources each with a proportion of the", "expected_scenario (SkeletonScenario): the scenario from the forecast data all_scenarios (list[SkeletonScenario]): The list of", "' + str(i + 1) data[load_key][key] = str(forecast) + '\\n' for i in", "(dict[str,List[float]]): a dictionary mapping load sources to 24-vectors sources (List[ExtendedSource]): This is just", "axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario {}'.format(self.name)) axis.legend() return", "if none is passed in). Args: axis: The axis to plot to Returns:", "the form \"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def", "index = ['Probability'] + list( pd.date_range(date, date+datetime.timedelta(hours=23), freq='H')) sources = list(self.scenarios[0].power_dict.keys()) all_scenarios =", "a name which is the concatenation of all scenario names, and a probability", "source + ' ' + str(i + 25) data[sources_key][key] = value return data", "__init__(self, name, power_dict, prob, comments=''): \"\"\" To initialize a PowerScenario object, one must", "load_data, sources, comments=''): \"\"\" Initializes an object of the SkeletonScenario class. Args: power_dict", "child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the scenarios stored in", "manage all single skeleton scenarios and have methods for exporting data to scenario", "disaggregated (dict[str,float]): A dictionary mapping names of the new sources to the proportion", "Args: node (InternalNode): An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the", "the 24-vectors of the power generation and the probabilities. This will create a", "as np import matplotlib as mpl import matplotlib.pyplot as plt import pandas as", "scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label = '_nolegend_' # Add forecast", "the source types comments (str): A string containing extra details about the scenario", "for each stage. \"\"\" def __init__(self): self.root = None def set_root(self, node): self.root", "the power dictionaries of the PowerScenario objects passed in. It will construct a", "power vectors. Args: source_names (list[str]): Names of the sources to aggregate aggregate_sources (str):", "class using self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\" root = InternalNode(\"root\", probability=1)", "1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self):", "minimum and maximum # dispatch values as (str) values. data = {sources_key: OrderedDict(),", "The name of the directory to save to title (str): The title of", "a collection of sources each with a proportion of the values. Args: aggregate_source", "'hydro']: key = source + ' ' + str(i + 1) value =", "if they are supposed to be in the plot \"\"\" if not os.path.isdir(directory):", "have a PowerScenario object and the corresponding paths # used to create it.", "be changed in the newly created object Args: load_data (dict[str,List[float]]): A dictionary mapping", "for child in self.root.children: child.write_json(output_directory) def create_raw_nodes(self): \"\"\" This turns the scenarios stored", "= {source.name: source.source_type for source in sources} self.dispatches = {source.name: source.frac_nondispatch for source", "the type of the respective source. \"\"\" for i in range(24): for source,", "string += 'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other): return self.name < other.name", "dispatch, forecast): \"\"\" Determines the minimum and the maximum dispatch value for the", "out the raw data for this scenario. The raw data in this sense", "data[load_key][key] # Copy the power generation values for the next 24 hours. return", "pysp information. The name of the file will be Scenario_<name>.dat where <name> is", "an associated name, probability, data, and pointers to parents and children. \"\"\" def", "\"\"\" # if no parent specified, assume parent is root parent_name = 'root'", "# ___________________________________________________________________________ import datetime import json import os from collections import OrderedDict, namedtuple", "range(24): # Duplicate the load forecast for the next 24 hours. for source", "node (InternalNode): An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts the internal", "is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated: self.types[other] = self.types[aggregate_source]", "other): return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This class should contain all", "of the date of the scenario. Args: directory (str): The path to the", "= node def write_json_files(self, output_directory): \"\"\" Writes json files for each of the", "passed in. It will construct a name which is the concatenation of all", "This is just used to get the source types comments (str): A string", "The list of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\ sorted(self.scenarios) def", "self.types[other] = self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory):", "self.name = name self.power_dict = power_dict self.probability = prob self.comments = comments def", "the scenario. Sets the parent to root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data", "prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses", "contain information about power generation and the associated probability and name. For each", "and the expected scenario. Returns: list[SkeletonScenario]: The list of all scenarios \"\"\" return", "Returns the corresponding Raw_Node_Data object for the actual and the expected scenario. Returns:", "# This will have a PowerScenario object and the corresponding paths # used", "will only contain information about power generation and the associated probability and name.", "+ str(i + 1) data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy the power", "def normalize_probabilities(self): \"\"\" This function will normalize the probabilities of the scenarios so", "path to the directory to store the files date (datetime-like): The date of", "scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a plot for every source and add", "self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name)", "self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add", "day probability (float): A value between 0 and 1 representing the probability of", "def set_root(self, node): self.root = node def write_json_files(self, output_directory): \"\"\" Writes json files", "xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day') axis.set_ylabel('Power Values') axis.set_title('Scenario", "This function will normalize the probabilities of the scenarios so that they add", "' + str(i + 25) data[sources_key][key] = value return data def to_raw_node(self): \"\"\"", "enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\"", "if they are changed by some other function, they will be changed in", "Args: directory (str): The path to the directory to store the files date", "__init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an object of the SkeletonScenarioSet class. Args:", "scenario in self.scenarios: source_scenario = scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label", "actual data for each stage. \"\"\" def __init__(self): self.root = None def set_root(self,", "normalize the probabilities of the scenarios so that they add up to 1.", "(list[SkeletonScenario]): The list of scenarios including the actual and expected scenario \"\"\" def", "np import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd", "dps to the plot. if dps is not None: label = 'Day Part", "directory (str): The name of the directory to save to title (str): The", "scenario.comments: comments += '\\n' + scenario.comments # Here we drop the last underscore", "a proportion of the values. Args: aggregate_source (str): The name of the source", "else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with open(directory + os.sep +", "Then we merge their path dictionaries path_dict = {} for scen in scenarios:", "source names to 24-vectors of power generation values prob (float): the probability of", "The list of scenarios to merge Returns: PowerScenario: A scenario which is formed", "source.source_type for source in sources} self.dispatches = {source.name: source.frac_nondispatch for source in sources}", "comments) # This will have a PowerScenario object and the corresponding paths #", "comments) self.load_data = load_data self.types = {source.name: source.source_type for source in sources} self.dispatches", "self.data) string += 'Children:\\n' for child in self.children: string += str(child) return string", "\"\"\" # In the case of solar power, the passed forecast will be", "Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the", "in self.load_data: key = source + ' ' + str(i + 1) data[load_key][source+'", "stored # as keys in the dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys())", "forecast = 0 if forecast is None else forecast min_dispatch = dispatch *", "this node to the specified directory Args: directory: the directory to store the", "which has the power generation vectors from all scenarios as well as the", "of a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a plot for every source", "scenario which merges all the power dictionaries of the PowerScenario objects passed in.", "= value for source in self.load_data: # Save the load forecast. forecast =", "pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns", "the proportion of the power of the original source is_load (bool): A flag", "the power generation vectors from all scenarios as well as the paths from", "from prescient.gosm.structures import skeleton_point_paths as paths import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as", "value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum", "forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object for the actual and", "= 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will update", "write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the actual and forecast data. Args: write_directory:", "source by a collection of sources each with a proportion of the values.", "to get the source types comments (str): A string containing extra details about", "Args: directory (str): A path to the directory to store the scenario file", "list Args: node (InternalNode): An InternalNode object \"\"\" self.children.append(node) def to_raw_node(self): \"\"\" Converts", "+ ' ' + str(i + 1) raw_value = self.power_dict[source][i] value = self.dispatch_value(self.dispatches[source],", "a plot for each source with all the power generation data for that", "= merge_independent_scenarios(power_scenarios) # Then we merge their path dictionaries path_dict = {} for", "data[load_key][source+' '+str(i+25)] = \\ data[load_key][key] # Copy the power generation values for the", "for the forecast. Args: dispatch (float): The fraction nondispatchable forecast (float): the forecast", "def write_json(self, directory): \"\"\" Writes json file for this node to the specified", "objects passed in. It will construct a name which is the concatenation of", "directory specified. It is necessary to pass in the date since this object", "the forecast value Returns: string: the minimum and the maximum dispatch value, separated", "to store the scenario file \"\"\" scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name)", "power vectors for every source stored in this scenario onto the axis passed", "License. # ___________________________________________________________________________ import datetime import json import os from collections import OrderedDict,", "forecast is None else forecast min_dispatch = dispatch * forecast value = \"{}", "the day probability (float): A value between 0 and 1 representing the probability", "matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd from prescient.gosm.structures", "disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated: self.types[other] = self.types[aggregate_source] self.dispatches[other]", "plot. if dps is not None: label = 'Day Part Separators' for h", "'' # We merge name, power dictionaries, probabilities, comments for scenario in scenarios:", "they are supposed to be in the plot \"\"\" if not os.path.isdir(directory): os.makedirs(directory)", "probability = 1 comments = '' # We merge name, power dictionaries, probabilities,", "vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the Day')", "key = source + ' 24' value = data[sources_key][key] else: raise RuntimeError(\"Power source", "file will only contain the 24-vectors of the power generation and the probabilities.", "of load values sources (List[ExtendedSource]): A list of the sources used in the", "all the power generation data for that given source. Args: directory (str): The", "for name, vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of", "+= 1 scenario_frame = pd.DataFrame(data=data, index=index, columns=columns) scenario_frame.to_csv(directory + os.sep + 'scenarios.csv') def", "values. key = source + ' ' + str(i + 1) raw_value =", "create_tree(self): \"\"\" This creates an instance of the Scenario Tree class using self.scenarios.", "scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable Power). It will store", "= self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') # Add dps to the plot. if", "self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\" root = InternalNode(\"root\", probability=1) for scenario", "It is necessary to pass in the date since this object does not", "for scen in scenarios] scenario = merge_independent_scenarios(power_scenarios) # Then we merge their path", "is a little hack to get the source names, these are stored #", "collection of sources each with a proportion of the values. Args: aggregate_source (str):", "in self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data in data.items(): string += \"{}:", "OrderedDict(), load_key: OrderedDict()} for i in range(24): for source in self.power_dict: # Translate", "color='k') # Name the axes. plt.xlabel('Hour') plt.ylabel('Power in Mw') # Create a title.", "will construct a name which is the concatenation of all scenario names, and", "def to_raw_node(self): \"\"\" Converts the internal node into a daps-style Raw_Node_Data object. Returns:", "dictionry in-place. Args: dict_ (dict): The dictionry to disaggregate aggregate_source (str): The name", "concatenation of all scenario names, and a probability which is a product of", "0 if forecast is None else forecast min_dispatch = dispatch * forecast value", "scenario.probability if scenario.comments: comments += '\\n' + scenario.comments # Here we drop the", "{}\\n'.format(self.probability)) for source in self.raw_data: f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt,", "is formed by merging all the other scenarios \"\"\" name = \"\" power_dict", "class SkeletonScenario(PowerScenario): \"\"\" This class should contain all the data parameters and values", "in this scenario onto the axis passed in (it will create one if", "name, vect in self.power_dict.items(): xs = list(range(24)) axis.plot(xs, vect, label=name) axis.set_xlabel('Hours of the", "only contain information about power generation and the associated probability and name. For", "which contains actual data for each stage. \"\"\" def __init__(self): self.root = None", "\"Internal Node {}:\\nprobability: {}\\ndata: {}\\n\".format( self.name, self.probability, self.data) string += 'Children:\\n' for child", "source power vectors for the sources provided and store that in a new", "respective source. \"\"\" for i in range(24): for source, source_type in self.types.items(): if", "the hours of sunshine. # In this case, set it to 0. forecast", "of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The named tuple object with a", "with NTESS, the U.S. # Government retains certain rights in this software. #", "scenario load_data (dict[str,List[float]]): a dictionary mapping load sources to 24-vectors sources (List[ExtendedSource]): This", "paths # used to create it. The paths attribute will point to a", "the fact that the file will only contain the 24-vectors of the power", "to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data) forecast_node = InternalNode(self.expected_scenario.name, self.expected_scenario.probability,", "a dictionary mapping source names to 24-vectors of power generation values prob (float):", "to the plot. if self.expected_scenario is not None: forecast_range = self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3,", "into strings of minimum # and maximum dispatch values. key = source +", "and merged path dictionary \"\"\" # We first merge the PowerScenario objects power_scenarios", "return data def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data object from the scenario.", "= '{}'.format(i+1) def write_actual_and_expected(self, write_directory): \"\"\" Writes json-files for the actual and forecast", "aggregate_source (str): The name of the source to be disaggregated disaggregated (dict[str,float]): A", "should contain all the data parameters and values that change from scenario to", "\"\" power_dict = {} probability = 1 comments = '' # We merge", "= directory + os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability))", "'Day Part Separators' for h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label", "24-vectors of power generation values prob (float): the probability of the scenario load_data", "hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method will update", "A list of the sources used in the scenario Returns: SkeletonScenario: The scenario", "expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return (self.actual_scenario.to_raw_node(), self.expected_scenario.to_raw_node()) def", "maximum dispatch values. key = source + ' ' + str(i + 1)", "the power of the original source \"\"\" aggregated_power = dict_[aggregate_source] del dict_[aggregate_source] for", "(float): The fraction nondispatchable forecast (float): the forecast value Returns: string: the minimum", "source '{}' has type '{}', the only \" \"types recognized are 'solar', 'wind',", "list of raw scenario nodes \"\"\" return [child.to_raw_node() for child in self.root.children] def", "the actual and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\"", "self.scenario_data, self.name, 'root', self.probability, self.comments) def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string =", "objects which has the power generation vectors from all scenarios as well as", "path_dict.update(scen.paths) return ScenarioWithPaths(scenario, path_dict) class PowerScenario: \"\"\" This class will only contain information", "import os from collections import OrderedDict, namedtuple import numpy as np import matplotlib", "= scenario.power_dict[source] plt.plot(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g') label = '_nolegend_' # Add", "generation vectors from all scenarios as well as the paths from all scenarios.", "\"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data): \"\"\" Copies the power generation", "/= prob_sum def normalize_names(self): \"\"\" This function will change the names of the", "source to be disaggregated disaggregated (dict[str,float]): A dictionary mapping names of the new", "scenarios in addition to the actual scenario and the expected scenario. Returns: list[SkeletonScenario]:", "scenario power_dict (dict[str,List[float]]): This is a dictionary mapping source names to a list", "to scenario values. \"\"\" # A dictionary of data with strings as keys", "values into strings of minimum # and maximum dispatch values. key = source", "specified. It is necessary to pass in the date since this object does", "the directory to store the scenario file \"\"\" scen_file = directory + os.sep", "source_names, aggregate_source): \"\"\" This method will add up all the source power vectors", "SkeletonScenario class. Args: power_dict (dict): a dictionary mapping source names to 24-vectors of", "this scenario. The raw data in this sense refers to the 24-vector of", "parameters and values that change from scenario to scenario (i.e, Min Dispatchable Power,", "__str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string", "# and maximum dispatch values. key = source + ' ' + str(i", "= \"\" power_dict = {} probability = 1 comments = '' # We", "of data with strings as keys and the minimum and maximum # dispatch", "def create_raw_nodes(self): \"\"\" This turns the scenarios stored in the true into daps-style", "'hydro'.\".format(source, source_type)) key = source + ' ' + str(i + 25) data[sources_key][key]", "the original source is_load (bool): A flag to indicate whether the source to", "interest, this will store a 24-vector of power-values produced. Attributes: name (str): The", "actual=None, expected=None): \"\"\" Initializes an object of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]):", "prob self.comments = comments def disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will update", "scenario.name scenario_name = source_name + ': ' + scen_name columns.append(scenario_name) values = [scenario.probability]", "source_name, \", \".join(map(str, power_vector))) string += 'Probability: {}\\n'.format(self.probability) return string def __lt__(self, other):", "['wind']: key = source + ' 24' value = data[sources_key][key] else: raise RuntimeError(\"Power", "to the plot. if dps is not None: label = 'Day Part Separators'", "self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This function writes out", "def disaggregate_source(self, aggregate_source, disaggregated, is_load=False): \"\"\" This method will update the dictionary of", "\"\"\" Writes json-files for the actual and forecast data. Args: write_directory: the directory", "of the PowerScenario objects passed in. It will construct a name which is", "to lists of 24 floats of power generation over the day probability (float):", "space \"\"\" # In the case of solar power, the passed forecast will", "Args: scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths objects to merge Returns: ScenarioWithPaths: The", "write all of the raw scenario files to the directory specified. Raw refers", "the form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\"", "\"\"\" This method will add up all the source power vectors for the", "the power generation data of the day for the next 24 hours, depending", "as plt import pandas as pd from prescient.gosm.structures import skeleton_point_paths as paths import", "Under the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains", "in self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\" Representation for", "scenario from the forecast data all_scenarios (list[SkeletonScenario]): The list of scenarios including the", "def __str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items():", "a dictionary mapping source names to a list of 24 values prob (float):", "Returns: ScenarioWithPaths: The named tuple object with a merged PowerScenario and merged path", "InternalNode # Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree =", "import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd from", "aggregate source \"\"\" power_vector = [0]*24 for name in source_names: for i, val", "(List[ExtendedSource]): This is just used to get the source types comments (str): A", "scenario expected (SkeletonScenario): The expected scenario \"\"\" self.scenarios = scenarios self.actual_scenario = actual", "daps-style Raw_Node_Data object from the scenario. Sets the parent to root currently. Returns:", "datetime import json import os from collections import OrderedDict, namedtuple import numpy as", "lists of 24 floats of power generation over the day probability (float): A", "probability (float): the probability of the scenario data: the data of the scenario", "is_load (bool): A flag to indicate whether the source to disaggregate is a", "next 24 hours, depending on the type of the respective source. \"\"\" for", "plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_' # Display a legend. lgd", "self.expected_scenario.data) actual_node.write_json(write_directory) forecast_node.write_json(write_directory) def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object for the", "axis plotted to \"\"\" if axis is None: fig, axis = plt.subplots() for", "Args: scenarios (List[PowerScenario]): The list of scenarios to merge Returns: PowerScenario: A scenario", "string def __lt__(self, other): return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This class", "stored in this scenario onto the axis passed in (it will create one", "scenarios and have methods for exporting data to scenario files as well. Attributes:", "data=None, parent=None, comments=''): \"\"\" Initializes an object of the InternalNode class. Args: name", "the # respective hour lies outside the hours of sunshine. # In this", "def actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object for the actual and the", "name of the directory to save to title (str): The title of the", "for source in self.power_dict: # Translate the power generation values into strings of", "Tree class using self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\" root = InternalNode(\"root\",", "24 values prob (float): The associated probability of the scenario comments (str): Additional", "InternalNode(\"root\", probability=1) for scenario in self.scenarios: # We pass through the comments as", "def __repr__(self): return \"SkeletonScenario({})\".format(self.name) def __str__(self): string = \"SkeletonScenario({}):\\n\".format(self.name) for key, data in", "the scenarios stored in the true into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A", "[0]*24 for name in source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i] += val", "actual_and_expected_node(self): \"\"\" Returns the corresponding Raw_Node_Data object for the actual and the expected", "to aggregate aggregate_sources (str): The name of the aggregate source \"\"\" power_vector =", "(dict): The dictionry to disaggregate aggregate_source (str): The name of the source to", "probabilistic scenarios in addition to the actual scenario and the expected scenario. Returns:", "Dispatchable Power, Max Dispatchable Power). It will store these results in a dictionary", "name of the source to be disaggregated disaggregated (dict[str,float]): A dictionary mapping names", "\"\"\" Basic plotting routine for the scenarios. This will create a plot for", "create a file called 'scenarios.csv' in the directory specified. It is necessary to", "distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower '", "into daps-style Raw_Node_Data objects. Returns: (List[Raw_Node_Data]): A list of raw scenario nodes \"\"\"", "Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under", "changed by some other function, they will be changed in the newly created", "by merging all the other scenarios \"\"\" name = \"\" power_dict = {}", "return SkeletonScenario(self.name, self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self):", "+ str(i + 25) data[sources_key][key] = value return data def to_raw_node(self): \"\"\" Creates", "24-vectors of the power generation and the probabilities. This will create a file", "disaggregate_source(self, aggregate_source, disaggregated): \"\"\" This method will update the dictionary of power values", "return \"PowerScenario({})\".format(self.name) def __str__(self): string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector", "between 0 and 1 representing the probability of the scenario comments (str): Additional", "-0.25), ncol=3, shadow=True) # Display a grid and the axes. plt.grid(True, which='both') plt.axhline(y=0,", "a title. plt.title(title + source, y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight')", "the paths from all scenarios. We assume independence across the scenarios. Args: scenarios", "scen_name = scenario.name scenario_name = source_name + ': ' + scen_name columns.append(scenario_name) values", "+= \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string += \"{}: {}\\n\".format( source_name, \",", "Args: scenarios (List[SkeletonScenario]): The list of scenarios actual (SkeletonScenario): The actual scenario expected", "Questionable... internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root)", "to disaggregate is a load source \"\"\" if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source,", "a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing scenario \"\"\" return pyspgen.CommentedRawNodeData(", "It will construct a name which is the concatenation of all scenario names,", "forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data): \"\"\" Copies", "of the scenarios in the tree \"\"\" for child in self.root.children: child.write_json(output_directory) def", "if self.actual_scenario is not None: actual_range = self.actual_scenario.power_dict[source] plt.plot(actual_range, zorder=3, label='Actual', color='b') #", "{source.name: source.frac_nondispatch for source in sources} def scenario_data(self): \"\"\" This will construct the", "each stage. \"\"\" def __init__(self): self.root = None def set_root(self, node): self.root =", "power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments += '\\n' + scenario.comments # Here", "the proportion of the power of the original source \"\"\" aggregated_power = dict_[aggregate_source]", "actual and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data \"\"\" return", "+ scen_name columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] = values i", "probability (float): A value between 0 and 1 representing the probability of the", "in \"\"\" # if no parent specified, assume parent is root parent_name =", "in disaggregated.items(): source_power = [proportion*value for value in aggregated_power] dict_[name] = source_power class", "We merge name, power dictionaries, probabilities, comments for scenario in scenarios: name +=", "sum(scen.probability for scen in self.scenarios) for scen in self.scenarios: scen.probability /= prob_sum def", "if forecast is None else forecast min_dispatch = dispatch * forecast value =", "we assume independence. Args: scenarios (List[PowerScenario]): The list of scenarios to merge Returns:", "for h in dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_' #", "create_raw_nodes(self): \"\"\" This turns the scenarios stored in the true into daps-style Raw_Node_Data", "containing extra details about the scenario \"\"\" PowerScenario.__init__(self, name, power_dict, prob, comments) self.load_data", "strings as keys and the minimum and maximum # dispatch values as (str)", "\"Scenario_i\". \"\"\" for i, scenario in enumerate(self.scenarios): scenario.name = '{}'.format(i+1) def write_actual_and_expected(self, write_directory):", "scenarios. label = 'Scenarios' for source in sources: plt.figure(source) for scenario in self.scenarios:", "f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines the minimum and the maximum", "the scenario power_dict (dict[str,List[float]]): This is a dictionary mapping source names to a", "list of scenarios actual (SkeletonScenario): The actual scenario expected (SkeletonScenario): The expected scenario", "data for this scenario. The raw data in this sense refers to the", "for scenario in self.scenarios] def create_tree(self): \"\"\" This creates an instance of the", "linestyle='--') label = '_nolegend_' # Display a legend. lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5,", "return string def __lt__(self, other): return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This", "if self.parent is None else self.parent.name filename = \"NODE-{}-PARENT-{}-PROB-{}.json\".format( self.name, parent_name, self.probability) with", "= sum(scen.probability for scen in self.scenarios) for scen in self.scenarios: scen.probability /= prob_sum", "self.types[aggregate_source] self.dispatches[other] = self.dispatches[aggregate_source] del self.types[aggregate_source] del self.dispatches[aggregate_source] def write_raw_data(self, directory): \"\"\" This", "+ os.sep + 'Scenario_{}.dat'.format(self.name) with open(scen_file, 'w') as f: f.write('Probability: {}\\n'.format(self.probability)) for source", "scenarios: name += scenario.name + '_' power_dict.update(scenario.power_dict) probability *= scenario.probability if scenario.comments: comments", "\"\"\" if not os.path.isdir(directory): os.makedirs(directory) # This is a little hack to get", "scenarios actual_scenario (SkeletonScenario): the scenario from the actual data expected_scenario (SkeletonScenario): the scenario", "Create a plot for every source and add all scenarios. label = 'Scenarios'", "the name of the scenario. Args: directory (str): A path to the directory", "internal_node = InternalNode(scenario.name, scenario.probability, scenario.data, root, scenario.comments) root.add_child(internal_node) tree = ScenarioTree() tree.set_root(root) return", "the dictionary of a scenario sources = list(self.scenarios[0].power_dict.keys()) # Create a plot for", "original source is_load (bool): A flag to indicate whether the source to disaggregate", "axis.set_title('Scenario {}'.format(self.name)) axis.legend() return axis def add_load_data(self, load_data, sources): \"\"\" This will create", "'Scenarios' for source in sources: plt.figure(source) for scenario in self.scenarios: source_scenario = scenario.power_dict[source]", "parent specified, assume parent is root parent_name = 'root' if self.parent is None", "the scenario parent: the parent node comments: A string detailing information about the", "string = \"\" string += \"PowerScenario({})\\n\".format(self.name) for source_name, power_vector in self.power_dict.items(): string +=", "= self.expected_scenario.power_dict[source] plt.plot(forecast_range, zorder=3, label='Forecast', color='r') if self.actual_scenario is not None: actual_range =", "self.load_data: # Save the load forecast. forecast = self.load_data[source][i] key = source +", "Returns: list[SkeletonScenario]: The list of all scenarios \"\"\" return [self.actual_scenario, self.expected_scenario] + \\", "\"\"\" def __init__(self, name, power_dict, prob, comments=''): \"\"\" To initialize a PowerScenario object,", "sources): \"\"\" This will create a SkeletonScenario object using the data in the", "dictionary mapping keys to scenario values. \"\"\" # A dictionary of data with", "is None: fig, axis = plt.subplots() for name, vect in self.power_dict.items(): xs =", "bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which merges all the", "scenario \"\"\" self.name = name self.probability = probability self.parent = parent self.data =", "add up to 1. \"\"\" prob_sum = sum(scen.probability for scen in self.scenarios) for", "the power generation values for the next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self,", "an individual node in the Scenario tree. Each node has an associated name,", "values. data = {sources_key: OrderedDict(), load_key: OrderedDict()} for i in range(24): for source", "these are stored # as keys in the dictionary of a scenario sources", "aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This class should manage all single", "= \"SkeletonScenario({}):\\n\".format(self.name) for key, data in self.data.items(): string += \"{}:\\n\".format(key) for inner_key, inner_data", "Here we drop the last underscore added name = name[:-1] return PowerScenario(name, power_dict,", "'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated): \"\"\" This method will update the", "in this software. # This software is distributed under the Revised BSD License.", "probability and name. For each source of interest, this will store a 24-vector", "disaggregate_dict(self.power_dict, aggregate_source, disaggregated) def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will add up", "(it will create one if none is passed in). Args: axis: The axis", "forecast. Args: dispatch (float): The fraction nondispatchable forecast (float): the forecast value Returns:", "to get the source names, these are stored # as keys in the", "\"types recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type)) key = source +", "power generation and the associated probability and name. For each source of interest,", "self.name, self.probability, self.data) string += 'Children:\\n' for child in self.children: string += str(child)", "collection of sources each with a proportion of the values. This will update", "write_raw_scenarios(self, directory, date): \"\"\" This routine should write all of the raw scenario", "Sets the parent to root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\"", "(list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario): the scenario from the actual data", "of the original source is_load (bool): A flag to indicate whether the source", "the parent to root currently. Returns: Raw_Node_Data: The equivalent Raw_Node_Data object \"\"\" return", "name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes json file for this", "def aggregate_sources(self, source_names, aggregate_source): \"\"\" This method will add up all the source", "the associated probability and name. For each source of interest, this will store", "scenarios. Returns: list[CommentedRawNodeData]: The list of node data objects \"\"\" return [scenario.to_raw_node() for", "creates a scenario which merges all the power dictionaries of the PowerScenario objects", "forecast data all_scenarios (list[SkeletonScenario]): The list of scenarios including the actual and expected", "value = \"{} {}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data): \"\"\" Copies the", "self.expected_scenario.to_raw_node()) def plot_scenarios(self, directory, title, dps=None): \"\"\" Basic plotting routine for the scenarios.", "source names, these are stored # as keys in the dictionary of a", "of the file will be Scenario_<name>.dat where <name> is replaced by the name", "scenarios to be numbered in the form \"Scenario_i\". \"\"\" for i, scenario in", "\"\"\" This property returns the list of probabilistic scenarios in addition to the", "list of scenarios to merge Returns: PowerScenario: A scenario which is formed by", "\"\"\" name = \"\" power_dict = {} probability = 1 comments = ''", "of the InternalNode class. Args: name (str): the name of the scenario probability", "Raw_Node_Data object for the actual and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data): Actual,", "source is_load (bool): A flag to indicate whether the source to disaggregate is", "None: label = 'Day Part Separators' for h in dps[source]: plt.axvline(x=h, zorder=1, label=label,", "json files for each of the scenarios in the tree \"\"\" for child", "the InternalNode class. Args: name (str): the name of the scenario probability (float):", "self.power_dict: # Translate the power generation values into strings of minimum # and", "function writes out the raw data for this scenario. The raw data in", "values for the next 24 hours. return self._copy_power_generation(data) def disaggregate_source(self, aggregate_source, disaggregated, is_load=False):", "Returns: (List[Raw_Node_Data]): A list of raw scenario nodes \"\"\" return [child.to_raw_node() for child", "{}\\n\".format(min_dispatch, forecast) return value def _copy_power_generation(self, data): \"\"\" Copies the power generation data", "to the specified directory Args: directory: the directory to store the json file", "2020 National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the", "data[sources_key][key] elif source_type in ['wind']: key = source + ' 24' value =", "The title of the plot dps (dict): the day part separators for each", "import prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from", "to the 24-vector of the power generation values produced in a scenario without", "scenario data: the data of the scenario parent: the parent node comments: A", "points to an internal node which contains actual data for each stage. \"\"\"", "get the source types comments (str): A string containing extra details about the", "return pyspgen.CommentedRawNodeData( dictin=self.data, name=self.name, parentname=self.parent.name, prob=self.probability, comments=self.comments) def write_json(self, directory): \"\"\" Writes json", "source + ' ' + str(i + 1) data[load_key][key] = str(forecast) + '\\n'", "write_json(self, directory): \"\"\" Writes json file for this node to the specified directory", "with all the power generation data for that given source. Args: directory (str):", "return self.name < other.name class SkeletonScenario(PowerScenario): \"\"\" This class should contain all the", "['solar', 'hydro']: key = source + ' ' + str(i + 1) value", "\"\"\" def __init__(self, scenarios, actual=None, expected=None): \"\"\" Initializes an object of the SkeletonScenarioSet", "source, y=1.08) plt.savefig(directory + os.sep + source, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(source) def merge_independent_scenarios(scenarios): \"\"\"", "the internal node into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing", "generation over the day probability (float): A value between 0 and 1 representing", "self.scenarios: scen.probability /= prob_sum def normalize_names(self): \"\"\" This function will change the names", "scenario.comments # Here we drop the last underscore added name = name[:-1] return", "self.power_dict, self.probability, load_data, sources, self.comments) def __repr__(self): return \"PowerScenario({})\".format(self.name) def __str__(self): string =", "in self.power_dict: # Translate the power generation values into strings of minimum #", "the directory to save to title (str): The title of the plot dps", "+ ' ' + str(i + 25) data[sources_key][key] = value return data def", "they are changed by some other function, they will be changed in the", "scenario. The raw data in this sense refers to the 24-vector of the", "f.write('Source: {}\\n'.format(source)) for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast):", "hours of sunshine. # In this case, set it to 0. forecast =", "the probability of the scenario comments (str): Additional details about how scenario was", "creates an instance of the Scenario Tree class using self.scenarios. Returns: ScenarioTree: the", "for source in self.load_data: # Save the load forecast. forecast = self.load_data[source][i] key", "The dictionry to disaggregate aggregate_source (str): The name of the source to be", "(str): The name of the aggregate source \"\"\" power_vector = [0]*24 for name", "from source names to lists of 24 floats of power generation over the", "return [child.to_raw_node() for child in self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root) class", "to 24-vectors of load values sources (List[ExtendedSource]): A list of the sources used", "value = self.dispatch_value(self.dispatches[source], raw_value) data[sources_key][key] = value for source in self.load_data: # Save", "'scenarios.csv') def create_raw_nodes(self): \"\"\" This returns a list of CommentedRawNodeData objcts instantiated from", "directory to store the scenario file \"\"\" scen_file = directory + os.sep +", "self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\" Simple plotting routing which will plot", "A dictionary mapping names of the new sources to the proportion of the", "of scenarios. The root points to an internal node which contains actual data", "a ScenarioWithPaths objects which has the power generation vectors from all scenarios as", "from the forecast data all_scenarios (list[SkeletonScenario]): The list of scenarios including the actual", "Returns: SkeletonScenario: The scenario with power and load values \"\"\" return SkeletonScenario(self.name, self.power_dict,", "source with the name aggregate_source. It will delete all the original source power", "Tree representation of a set of scenarios. The root points to an internal", "class InternalNode: \"\"\" Representation for an individual node in the Scenario tree. Each", "for dt, value in self.raw_data[source].items(): f.write('{},{}\\n'.format(dt, value)) def dispatch_value(self, dispatch, forecast): \"\"\" Determines", "a blank space \"\"\" # In the case of solar power, the passed", "import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source,", "json import os from collections import OrderedDict, namedtuple import numpy as np import", "generation and the associated probability and name. For each source of interest, this", "the corresponding Raw_Node_Data object for the actual and the expected scenario. Returns: (Raw_Node_Data,", "plot all the power vectors for every source stored in this scenario onto", "self.name = name self.probability = probability self.parent = parent self.data = data self.children", "power_dict (dict): a dictionary mapping source names to 24-vectors of power generation values", "of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in", "date of the scenarios \"\"\" if not os.path.isdir(directory): os.mkdir(directory) index = ['Probability'] +", "will update the dictionary of power values by replacing the values for the", "if is_load: disaggregate_dict(self.load_data) else: PowerScenario.disaggregate_source(self, aggregate_source, disaggregated) for other in disaggregated: self.types[other] =", "the proportion of the power of the original source \"\"\" disaggregate_dict(self.power_dict, aggregate_source, disaggregated)", "center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True) # Display a grid and the axes. plt.grid(True,", "will create a SkeletonScenario object using the data in the PowerScenario in conjunction", "for child in self.root.children] def __str__(self): return \"Tree:\\n\" + str(self.root) class InternalNode: \"\"\"", "source_type in ['wind']: key = source + ' 24' value = data[sources_key][key] else:", "parent node comments: A string detailing information about the scenario \"\"\" self.name =", "Attributes: scenarios (list[SkeletonScenario]): a list of scenarios actual_scenario (SkeletonScenario): the scenario from the", "add up all the source power vectors for the sources provided and store", "changed in the newly created object Args: load_data (dict[str,List[float]]): A dictionary mapping names", "25) data[sources_key][key] = value return data def to_raw_node(self): \"\"\" Creates a daps-style Raw_Node_Data", "+= 'Children:\\n' for child in self.children: string += str(child) return string + '\\n\\n'", "of the sources used in the scenario Returns: SkeletonScenario: The scenario with power", "directory, title, dps=None): \"\"\" Basic plotting routine for the scenarios. This will create", "elif source_type in ['wind']: key = source + ' 24' value = data[sources_key][key]", "in ['wind']: key = source + ' 24' value = data[sources_key][key] else: raise", "class PowerScenario: \"\"\" This class will only contain information about power generation and", "= name[:-1] return PowerScenario(name, power_dict, probability, comments) # This will have a PowerScenario", "scenarios (List[PowerScenario]): The list of scenarios to merge Returns: PowerScenario: A scenario which", "the probability of the scenario data: the data of the scenario parent: the", "= actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This", "from collections import OrderedDict, namedtuple import numpy as np import matplotlib as mpl", "the expected scenario. Returns: list[SkeletonScenario]: The list of all scenarios \"\"\" return [self.actual_scenario,", "fact that the file will only contain the 24-vectors of the power generation", "'{}', the only \" \"types recognized are 'solar', 'wind', \" \"and 'hydro'.\".format(source, source_type))", "source_type in self.types.items(): if source_type in ['solar', 'hydro']: key = source + '", "for source in sources} def scenario_data(self): \"\"\" This will construct the dictionary mapping", "\"\"\" Initializes an object of the SkeletonScenarioSet class. Args: scenarios (List[SkeletonScenario]): The list", "def create_tree(self): \"\"\" This creates an instance of the Scenario Tree class using", "is necessary to pass in the date since this object does not have", "# if no parent specified, assume parent is root parent_name = 'root' if", "': ' + scen_name columns.append(scenario_name) values = [scenario.probability] + \\ scenario.power_dict[source_name] data[:,i] =", "should manage all single skeleton scenarios and have methods for exporting data to", "should write all of the raw scenario files to the directory specified. Raw", "ScenarioTree() tree.set_root(root) return tree def normalize_probabilities(self): \"\"\" This function will normalize the probabilities", "the names of the scenarios to be numbered in the form \"Scenario_i\". \"\"\"", "'forecasts' else: scen_name = scenario.name scenario_name = source_name + ': ' + scen_name", "val del self.power_dict[name] self.power_dict[aggregate_source] = power_vector def plot(self, axis=None): \"\"\" Simple plotting routing", "scenarios. The root points to an internal node which contains actual data for", "namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths objects and return", "data all_scenarios (list[SkeletonScenario]): The list of scenarios including the actual and expected scenario", "will be changed in the newly created object Args: load_data (dict[str,List[float]]): A dictionary", "# of the form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def", "OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths']) def merge_scenarios_with_paths(scenarios): \"\"\" This will merge ScenarioWithPaths", "power values by replacing the values for the specified source by a collection", "for each source with all the power generation data for that given source.", "(List[ExtendedSource]): A list of the sources used in the scenario Returns: SkeletonScenario: The", "a dictionary # of the form {source_name -> OneDimPath} ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario',", "name in source_names: for i, val in enumerate(self.power_dict[name]): power_vector[i] += val del self.power_dict[name]", "the 24-vector of the power generation values produced in a scenario without any", "scenarios, actual=None, expected=None): \"\"\" Initializes an object of the SkeletonScenarioSet class. Args: scenarios", "an object of the InternalNode class. Args: name (str): the name of the", "\"\"\" self.scenarios = scenarios self.actual_scenario = actual self.expected_scenario = expected self.source_names = list(scenarios[0].power_dict.keys())", "mapping source names to a list of 24 values prob (float): The associated", "all the power dictionaries of the PowerScenario objects passed in. It will construct", "\"\"\" Copies the power generation data of the day for the next 24", "UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower ' def disaggregate_dict(dict_, aggregate_source, disaggregated):", "with a proportion of the values. This will update the dictionry in-place. Args:", "the json file in \"\"\" # if no parent specified, assume parent is", "in aggregated_power] dict_[name] = source_power class SkeletonScenarioSet: \"\"\" This class should manage all", "Args: write_directory: the directory to write in \"\"\" actual_node = InternalNode(self.actual_scenario.name, self.actual_scenario.probability, self.actual_scenario.data)", "corresponding Raw_Node_Data object for the actual and the expected scenario. Returns: (Raw_Node_Data, Raw_Node_Data):", "# Add dps to the plot. if dps is not None: label =", "dps[source]: plt.axvline(x=h, zorder=1, label=label, color='grey', linestyle='--') label = '_nolegend_' # Display a legend.", "list of node data objects \"\"\" return [scenario.to_raw_node() for scenario in self.scenarios] def", "min_dispatch = dispatch * forecast value = \"{} {}\\n\".format(min_dispatch, forecast) return value def", "internal node into a daps-style Raw_Node_Data object. Returns: (Raw_Node_Data): raw node representing scenario", "The equivalent Raw_Node_Data object \"\"\" return pyspgen.CommentedRawNodeData( self.scenario_data, self.name, 'root', self.probability, self.comments) def", "the scenario. Args: directory (str): The path to the directory to store the", "node data objects \"\"\" return [scenario.to_raw_node() for scenario in self.scenarios] def create_tree(self): \"\"\"", "and the maximum dispatch value for the forecast. Args: dispatch (float): The fraction", "from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key =", "names to lists of 24 floats of power generation over the day probability", "scenario in self.scenarios: # We pass through the comments as well to the", "\"\"\" root = InternalNode(\"root\", probability=1) for scenario in self.scenarios: # We pass through", "outside the hours of sunshine. # In this case, set it to 0.", "the minimum and maximum # dispatch values as (str) values. data = {sources_key:", "drop the last underscore added name = name[:-1] return PowerScenario(name, power_dict, probability, comments)", "will plot all the power vectors for every source stored in this scenario", "will be None if the # respective hour lies outside the hours of", "the sources to aggregate aggregate_sources (str): The name of the aggregate source \"\"\"", "Scenario Tree class using self.scenarios. Returns: ScenarioTree: the scenario tree \"\"\" root =", "The name of the scenario power_dict (dict): A mapping from source names to", "A path to the directory to store the scenario file \"\"\" scen_file =", "proportion in disaggregated.items(): source_power = [proportion*value for value in aggregated_power] dict_[name] = source_power", "load sources to 24-vectors of load values sources (List[ExtendedSource]): A list of the", "solar power, the passed forecast will be None if the # respective hour", "import distribution_factory from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution load_key = 'Demand' sources_key = 'MinNondispatchablePower MaxNondispatchablePower", "value for source in self.load_data: # Save the load forecast. forecast = self.load_data[source][i]", "of all scenario names, and a probability which is a product of all", "prescient.gosm.pyspgen as pyspgen import prescient.gosm.basicclasses as basicclasses from prescient.util.distributions.distribution_factory import distribution_factory from prescient.util.distributions.distributions", "Note this will not copy the values, so if they are changed by", "& Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of Contract", "mapping from source names to lists of 24 floats of power generation over", "= expected self.source_names = list(scenarios[0].power_dict.keys()) @property def all_scenarios(self): \"\"\" This property returns the", "PowerScenario objects passed in. It will construct a name which is the concatenation", "the aggregate source \"\"\" power_vector = [0]*24 for name in source_names: for i,", "only contain the 24-vectors of the power generation and the probabilities. This will", "def merge_independent_scenarios(scenarios): \"\"\" This creates a scenario which merges all the power dictionaries" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE try: import", "psfw = 1.0 stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix = int(round(x))", "2 * math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac = (1.0 -", "range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int) f.write(\"\".join([chr(min(i,255)) for i", "astropy.io.fits\") import math from math import exp from matplotlib.pylab import imread from numpy.oldnumeric.functions", "for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int)", "0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' % (w, h, 255)) #for j", "X = XY.field('X') Y = XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float) for", "= XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix", "import either pyfits or astropy.io.fits\") import math from math import exp from matplotlib.pylab", "from astropy.io import fits as pyfits except ImportError: raise ImportError(\"Cannot import either pyfits", "either pyfits or astropy.io.fits\") import math from math import exp from matplotlib.pylab import", "+ dx if xx < 0 or xx >= w: continue dd =", "BSD style license - see LICENSE try: import pyfits except ImportError: try: from", "stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac", "= 1.0 stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix = int(round(x)) iy", "in range(h): # for i in range(w): # for p in range(planes): #", "(h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y') psfw", "math import exp from matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png')", "fits as pyfits except ImportError: raise ImportError(\"Cannot import either pyfits or astropy.io.fits\") import", "1.0 stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix = int(round(x)) iy =", "ix = int(round(x)) iy = int(round(y)) for dy in range(-5, 6): yy =", "in range(-5, 6): xx = ix + dx if xx < 0 or", "is part of the Astrometry.net suite. # Licensed under a 3-clause BSD style", "- maxorig) #for p in range(planes): # I[:,:,p] = I[:,:,p] * origfrac +", "#origfrac = 0.5 #maxorig = I.max() #starfrac = (1.0 - origfrac) + (1.0", "f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int) f.write(\"\".join([chr(min(i,255)) for i in flatI]))", "0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' % (w,", "I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac for p in range(planes):", "dy in range(-5, 6): yy = iy + dy if yy < 0", "* math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac = (1.0 - origfrac)", "for p in range(planes): I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8", "(x,y) in zip(X,Y): ix = int(round(x)) iy = int(round(y)) for dy in range(-5,", "f.write('P6 %i %i %i\\n' % (w, h, 255)) #for j in range(h): #", "in range(planes): I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb')", "Astrometry.net suite. # Licensed under a 3-clause BSD style license - see LICENSE", "'wb') f.write('P6 %i %i %i\\n' % (w, h, 255)) #for j in range(h):", "from matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) =", "ImportError: raise ImportError(\"Cannot import either pyfits or astropy.io.fits\") import math from math import", "* starfrac for p in range(planes): I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max()", "6): yy = iy + dy if yy < 0 or yy >=", "p in range(planes): # I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac", "pyfits or astropy.io.fits\") import math from math import exp from matplotlib.pylab import imread", "= iy + dy if yy < 0 or yy >= h: continue", "in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int) f.write(\"\".join([chr(min(i,255)) for", "math from math import exp from matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros,", "math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac = (1.0 - origfrac) +", "+ stars/stars.max() * starfrac for p in range(planes): I[:,:,p] = I[:,:,p] * 0.7", "(2 * psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac = 0.5 #maxorig =", "#for p in range(planes): # I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() *", "= pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y') psfw = 1.0 stars =", "import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X =", "ix + dx if xx < 0 or xx >= w: continue dd", "(yy - y)**2 stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2", "= XY.field('X') Y = XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float) for (x,y)", "for (x,y) in zip(X,Y): ix = int(round(x)) iy = int(round(y)) for dy in", "f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' % (w, h, 255)) #for j in", "int(round(y)) for dy in range(-5, 6): yy = iy + dy if yy", "= zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix = int(round(x)) iy = int(round(y)) for", "origfrac + stars/stars.max() * starfrac for p in range(planes): I[:,:,p] = I[:,:,p] *", "range(planes): I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6", "iy + dy if yy < 0 or yy >= h: continue for", "#for j in range(h): # for i in range(w): # for p in", "= (1.0 - origfrac) + (1.0 - maxorig) #for p in range(planes): #", "= int(round(y)) for dy in range(-5, 6): yy = iy + dy if", "x)**2 + (yy - y)**2 stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2", "= I[:,:,p] * origfrac + stars/stars.max() * starfrac for p in range(planes): I[:,:,p]", "or yy >= h: continue for dx in range(-5, 6): xx = ix", "(xx - x)**2 + (yy - y)**2 stars[yy,xx] += exp(-dd / (2 *", "stars/stars.max() * starfrac for p in range(planes): I[:,:,p] = I[:,:,p] * 0.7 +", "0 or xx >= w: continue dd = (xx - x)**2 + (yy", "I[:,:,p] * origfrac + stars/stars.max() * starfrac for p in range(planes): I[:,:,p] =", "0.5 #maxorig = I.max() #starfrac = (1.0 - origfrac) + (1.0 - maxorig)", "in zip(X,Y): ix = int(round(x)) iy = int(round(y)) for dy in range(-5, 6):", "part of the Astrometry.net suite. # Licensed under a 3-clause BSD style license", "astropy.io import fits as pyfits except ImportError: raise ImportError(\"Cannot import either pyfits or", "import exp from matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3]", "I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y')", "* 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' % (w, h, 255)) #for", "under a 3-clause BSD style license - see LICENSE try: import pyfits except", "xx = ix + dx if xx < 0 or xx >= w:", "yy < 0 or yy >= h: continue for dx in range(-5, 6):", "XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y') psfw = 1.0 stars", "range(-5, 6): yy = iy + dy if yy < 0 or yy", "if xx < 0 or xx >= w: continue dd = (xx -", "psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac", "This file is part of the Astrometry.net suite. # Licensed under a 3-clause", "as pyfits except ImportError: raise ImportError(\"Cannot import either pyfits or astropy.io.fits\") import math", "I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i", "#maxorig = I.max() #starfrac = (1.0 - origfrac) + (1.0 - maxorig) #for", "= I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y') psfw =", "exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac = 0.5", "import math from math import exp from matplotlib.pylab import imread from numpy.oldnumeric.functions import", "range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel()", "stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' % (w, h, 255))", "ImportError: try: from astropy.io import fits as pyfits except ImportError: raise ImportError(\"Cannot import", "# This file is part of the Astrometry.net suite. # Licensed under a", "= int(round(x)) iy = int(round(y)) for dy in range(-5, 6): yy = iy", "zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix = int(round(x)) iy = int(round(y)) for dy", "i in range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI", "int(round(x)) iy = int(round(y)) for dy in range(-5, 6): yy = iy +", "= 0.5 #maxorig = I.max() #starfrac = (1.0 - origfrac) + (1.0 -", "/ (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac = 0.5 #maxorig", "Y = XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y):", "for dx in range(-5, 6): xx = ix + dx if xx <", "j in range(h): # for i in range(w): # for p in range(planes):", "h: continue for dx in range(-5, 6): xx = ix + dx if", "# for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() *", "except ImportError: try: from astropy.io import fits as pyfits except ImportError: raise ImportError(\"Cannot", "license - see LICENSE try: import pyfits except ImportError: try: from astropy.io import", "< 0 or xx >= w: continue dd = (xx - x)**2 +", "suite. # Licensed under a 3-clause BSD style license - see LICENSE try:", "3-clause BSD style license - see LICENSE try: import pyfits except ImportError: try:", "range(planes): # I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac for p", "yy = iy + dy if yy < 0 or yy >= h:", "import pyfits except ImportError: try: from astropy.io import fits as pyfits except ImportError:", "pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float)", "<reponame>juandesant/astrometry.net # This file is part of the Astrometry.net suite. # Licensed under", "(1.0 - origfrac) + (1.0 - maxorig) #for p in range(planes): # I[:,:,p]", "* 2 * math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac = (1.0", "* 0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' %", "XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix =", "p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int) f.write(\"\".join([chr(min(i,255))", "from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data", "imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY =", "dx in range(-5, 6): xx = ix + dx if xx < 0", "# f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int) f.write(\"\".join([chr(min(i,255)) for i in", "the Astrometry.net suite. # Licensed under a 3-clause BSD style license - see", "maxorig) #for p in range(planes): # I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max()", ">= h: continue for dx in range(-5, 6): xx = ix + dx", "dd = (xx - x)**2 + (yy - y)**2 stars[yy,xx] += exp(-dd /", "if yy < 0 or yy >= h: continue for dx in range(-5,", "* psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac = 0.5 #maxorig = I.max()", "origfrac) + (1.0 - maxorig) #for p in range(planes): # I[:,:,p] = I[:,:,p]", "LICENSE try: import pyfits except ImportError: try: from astropy.io import fits as pyfits", "I.max() #starfrac = (1.0 - origfrac) + (1.0 - maxorig) #for p in", "in range(planes): # I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac for", "see LICENSE try: import pyfits except ImportError: try: from astropy.io import fits as", "pyfits except ImportError: try: from astropy.io import fits as pyfits except ImportError: raise", "continue dd = (xx - x)**2 + (yy - y)**2 stars[yy,xx] += exp(-dd", "# for i in range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p] *", ">= w: continue dd = (xx - x)**2 + (yy - y)**2 stars[yy,xx]", "(1.0 - maxorig) #for p in range(planes): # I[:,:,p] = I[:,:,p] * origfrac", "h, 255)) #for j in range(h): # for i in range(w): # for", "range(h): # for i in range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p]", "for dy in range(-5, 6): yy = iy + dy if yy <", "except ImportError: raise ImportError(\"Cannot import either pyfits or astropy.io.fits\") import math from math", "or xx >= w: continue dd = (xx - x)**2 + (yy -", "* 255.0)))) flatI = (I.ravel() * 255.0).round().astype(int) f.write(\"\".join([chr(min(i,255)) for i in flatI])) f.close()", "or astropy.io.fits\") import math from math import exp from matplotlib.pylab import imread from", "6): xx = ix + dx if xx < 0 or xx >=", "Licensed under a 3-clause BSD style license - see LICENSE try: import pyfits", "ImportError(\"Cannot import either pyfits or astropy.io.fits\") import math from math import exp from", "xx >= w: continue dd = (xx - x)**2 + (yy - y)**2", "in range(-5, 6): yy = iy + dy if yy < 0 or", "zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X')", "0 or yy >= h: continue for dx in range(-5, 6): xx =", "#starfrac = (1.0 - origfrac) + (1.0 - maxorig) #for p in range(planes):", "matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape", "(w, h, 255)) #for j in range(h): # for i in range(w): #", "try: from astropy.io import fits as pyfits except ImportError: raise ImportError(\"Cannot import either", "dy if yy < 0 or yy >= h: continue for dx in", "dx if xx < 0 or xx >= w: continue dd = (xx", "w: continue dd = (xx - x)**2 + (yy - y)**2 stars[yy,xx] +=", "numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X", "pyfits except ImportError: raise ImportError(\"Cannot import either pyfits or astropy.io.fits\") import math from", "I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y =", "255)) #for j in range(h): # for i in range(w): # for p", "iy = int(round(y)) for dy in range(-5, 6): yy = iy + dy", "+ stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n' % (w, h,", "+ (1.0 - maxorig) #for p in range(planes): # I[:,:,p] = I[:,:,p] *", "% (w, h, 255)) #for j in range(h): # for i in range(w):", "from math import exp from matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros, ravel", "= I[:,:,p] * 0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i", "file is part of the Astrometry.net suite. # Licensed under a 3-clause BSD", "of the Astrometry.net suite. # Licensed under a 3-clause BSD style license -", "= I.max() #starfrac = (1.0 - origfrac) + (1.0 - maxorig) #for p", "yy >= h: continue for dx in range(-5, 6): xx = ix +", "exp from matplotlib.pylab import imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes)", "raise ImportError(\"Cannot import either pyfits or astropy.io.fits\") import math from math import exp", "ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y", "I[:,:,p] * 0.7 + stars/stars.max() * 0.8 f=open('out.ppm', 'wb') f.write('P6 %i %i %i\\n'", "= ix + dx if xx < 0 or xx >= w: continue", "= (xx - x)**2 + (yy - y)**2 stars[yy,xx] += exp(-dd / (2", "#1./(psfw**2 * 2 * math.pi #origfrac = 0.5 #maxorig = I.max() #starfrac =", "import fits as pyfits except ImportError: raise ImportError(\"Cannot import either pyfits or astropy.io.fits\")", "+= exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi #origfrac =", "continue for dx in range(-5, 6): xx = ix + dx if xx", "range(-5, 6): xx = ix + dx if xx < 0 or xx", "- origfrac) + (1.0 - maxorig) #for p in range(planes): # I[:,:,p] =", "import imread from numpy.oldnumeric.functions import zeros, ravel I=imread('3.png') I=I[:,:,:3] (h,w,planes) = I.shape XY", "starfrac for p in range(planes): I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() *", "y)**2 stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi", "p in range(planes): I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8 f=open('out.ppm',", "%i %i\\n' % (w, h, 255)) #for j in range(h): # for i", "%i\\n' % (w, h, 255)) #for j in range(h): # for i in", "try: import pyfits except ImportError: try: from astropy.io import fits as pyfits except", "- x)**2 + (yy - y)**2 stars[yy,xx] += exp(-dd / (2 * psfw**2))", "%i %i %i\\n' % (w, h, 255)) #for j in range(h): # for", "style license - see LICENSE try: import pyfits except ImportError: try: from astropy.io", "I.shape XY = pyfits.open('16b.fits')[1].data X = XY.field('X') Y = XY.field('Y') psfw = 1.0", "+ dy if yy < 0 or yy >= h: continue for dx", "for i in range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0))))", "+ (yy - y)**2 stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 *", "zip(X,Y): ix = int(round(x)) iy = int(round(y)) for dy in range(-5, 6): yy", "in range(w): # for p in range(planes): # f.write(chr(int(round(I[j,i,p] * 255.0)))) flatI =", "stars = zeros((h,w)).astype(float) for (x,y) in zip(X,Y): ix = int(round(x)) iy = int(round(y))", "XY.field('X') Y = XY.field('Y') psfw = 1.0 stars = zeros((h,w)).astype(float) for (x,y) in", "# I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac for p in", "xx < 0 or xx >= w: continue dd = (xx - x)**2", "- y)**2 stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 *", "a 3-clause BSD style license - see LICENSE try: import pyfits except ImportError:", "< 0 or yy >= h: continue for dx in range(-5, 6): xx", "- see LICENSE try: import pyfits except ImportError: try: from astropy.io import fits", "* origfrac + stars/stars.max() * starfrac for p in range(planes): I[:,:,p] = I[:,:,p]" ]
[ "import tsinfer import tskit if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A", "times = sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time", "parallelising the inference. This script takes a sampledata file (usually containing missing data),", "ending in '.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for", "sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print(", "missing data), calculates the times-as-freq values, then bins them into frequency bands. \"\"\"", "bins them into frequency bands. \"\"\" import argparse import numpy as np import", "counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious freq-as-time values assert counts.known", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending", "https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number", "counts.derived / counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples,", "each time point, which can cause difficulties parallelising the inference. This script takes", "script takes a sampledata file (usually containing missing data), calculates the times-as-freq values,", "different time points, often only one ancestor in each time point, which can", "'.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant", "no obvious freq-as-time values assert counts.known != counts.derived assert counts.known != counts.ancestral assert", "bands. \"\"\" import argparse import numpy as np import tsinfer import tskit if", "j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts =", "= parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)):", "\"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\",", "missing data create ancestors at many different time points, often only one ancestor", "one ancestor in each time point, which can cause difficulties parallelising the inference.", "a sampledata file (usually containing missing data), calculates the times-as-freq values, then bins", "times-as-freq values, then bins them into frequency bands. \"\"\" import argparse import numpy", "them into frequency bands. \"\"\" import argparse import numpy as np import tsinfer", "values assert counts.known != counts.derived assert counts.known != counts.ancestral assert counts.known > 0", "sites have no obvious freq-as-time values assert counts.known != counts.derived assert counts.known !=", "that if n_alleles > 2 this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id]", "variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no", "!= counts.derived assert counts.known != counts.ancestral assert counts.known > 0 # Time =", "tskit if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file", "freq of *all* derived alleles. Note that if n_alleles > 2 this #", "# may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] =", "== \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in '.samples\")", "assert counts.known != counts.ancestral assert counts.known > 0 # Time = freq of", "sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \". Number of", "Time = freq of *all* derived alleles. Note that if n_alleles > 2", "be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples", "numpy as np import tsinfer import tskit if __name__ == \"__main__\": parser =", "*all* derived alleles. Note that if n_alleles > 2 this # may not", "data), calculates the times-as-freq values, then bins them into frequency bands. \"\"\" import", "counts.known != counts.derived assert counts.known != counts.ancestral assert counts.known > 0 # Time", "sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \". Number of discrete times:\", len(np.unique(sd.sites_time[:]))) sd.finalise()", "the inference. This script takes a sampledata file (usually containing missing data), calculates", "# Non-variable sites have no obvious freq-as-time values assert counts.known != counts.derived assert", "parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A", "This script takes a sampledata file (usually containing missing data), calculates the times-as-freq", "if n_alleles > 2 this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] =", "\"\"\" import argparse import numpy as np import tsinfer import tskit if __name__", "sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED:", "not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] = np.around(times *", "tsinfer sample file ending in '.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times", "alleles. Note that if n_alleles > 2 this # may not be sensible:", "freq-as-time values assert counts.known != counts.derived assert counts.known != counts.ancestral assert counts.known >", "np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \". Number of discrete times:\",", "file ending in '.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:]", "difficulties parallelising the inference. This script takes a sampledata file (usually containing missing", "the times-as-freq values, then bins them into frequency bands. \"\"\" import argparse import", "takes a sampledata file (usually containing missing data), calculates the times-as-freq values, then", "parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in '.samples\") args = parser.parse_args() sd =", "this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:]", "can cause difficulties parallelising the inference. This script takes a sampledata file (usually", "sample file ending in '.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times =", "import argparse import numpy as np import tsinfer import tskit if __name__ ==", "* sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \". Number of discrete times:\", len(np.unique(sd.sites_time[:])))", "tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if", "# Time = freq of *all* derived alleles. Note that if n_alleles >", "Non-variable sites have no obvious freq-as-time values assert counts.known != counts.derived assert counts.known", "= variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have", "file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in '.samples\") args", "counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \". Number", "counts.known > 0 # Time = freq of *all* derived alleles. Note that", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in", "create ancestors at many different time points, often only one ancestor in each", "np import tsinfer import tskit if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\",", "which can cause difficulties parallelising the inference. This script takes a sampledata file", "tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in", "in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in '.samples\") args = parser.parse_args()", "argparse import numpy as np import tsinfer import tskit if __name__ == \"__main__\":", "have no obvious freq-as-time values assert counts.known != counts.derived assert counts.known != counts.ancestral", "with missing data create ancestors at many different time points, often only one", "file (usually containing missing data), calculates the times-as-freq values, then bins them into", "help=\"A tsinfer sample file ending in '.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file)", "assert counts.known != counts.derived assert counts.known != counts.ancestral assert counts.known > 0 #", "if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious", "files with missing data create ancestors at many different time points, often only", "help=\"A tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending", "\"\"\" Sample data files with missing data create ancestors at many different time", "Sample data files with missing data create ancestors at many different time points,", "2 this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known", "data create ancestors at many different time points, often only one ancestor in", "args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant in", "data files with missing data create ancestors at many different time points, often", "as np import tsinfer import tskit if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__)", "in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) #", "counts.known != counts.ancestral assert counts.known > 0 # Time = freq of *all*", "ancestor in each time point, which can cause difficulties parallelising the inference. This", "tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious freq-as-time values assert counts.known != counts.derived", "= freq of *all* derived alleles. Note that if n_alleles > 2 this", "import numpy as np import tsinfer import tskit if __name__ == \"__main__\": parser", "obvious freq-as-time values assert counts.known != counts.derived assert counts.known != counts.ancestral assert counts.known", "== tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious freq-as-time values", "many different time points, often only one ancestor in each time point, which", "parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time", "then bins them into frequency bands. \"\"\" import argparse import numpy as np", "> 0 # Time = freq of *all* derived alleles. Note that if", "0 # Time = freq of *all* derived alleles. Note that if n_alleles", "Note that if n_alleles > 2 this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228", "/ counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \".", "calculates the times-as-freq values, then bins them into frequency bands. \"\"\" import argparse", "points, often only one ancestor in each time point, which can cause difficulties", "values, then bins them into frequency bands. \"\"\" import argparse import numpy as", "point, which can cause difficulties parallelising the inference. This script takes a sampledata", "ancestors at many different time points, often only one ancestor in each time", "import tskit if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample", "containing missing data), calculates the times-as-freq values, then bins them into frequency bands.", "often only one ancestor in each time point, which can cause difficulties parallelising", "inference. This script takes a sampledata file (usually containing missing data), calculates the", "in '.samples\") args = parser.parse_args() sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j,", "in each time point, which can cause difficulties parallelising the inference. This script", "sd = tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time =", "assert counts.known > 0 # Time = freq of *all* derived alleles. Note", "time points, often only one ancestor in each time point, which can cause", "cause difficulties parallelising the inference. This script takes a sampledata file (usually containing", "argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample", "sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in '.samples\")", "time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious freq-as-time", "(usually containing missing data), calculates the times-as-freq values, then bins them into frequency", "parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file", "'.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in '.samples\") args = parser.parse_args() sd", "tsinfer import tskit if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer", "= sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time ==", "sampledata file (usually containing missing data), calculates the times-as-freq values, then bins them", "times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number of", "!= counts.ancestral assert counts.known > 0 # Time = freq of *all* derived", "ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer sample file ending in '.samples\") args =", "at many different time points, often only one ancestor in each time point,", "= argparse.ArgumentParser(description=__doc__) parser.add_argument(\"input_file\", help=\"A tsinfer sample file ending in '.samples\") parser.add_argument(\"output_file\", help=\"A tsinfer", "= tsinfer.load(args.input_file).copy(path=args.output_file) times = sd.sites_time[:] for j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time", "tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious freq-as-time values assert", "n_alleles > 2 this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived", "= counts.derived / counts.known sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\",", "= np.around(times * sd.num_samples)/sd.num_samples print( \"Number of samples:\", sd.num_samples, \". Number of discrete", "= tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites have no obvious freq-as-time values assert counts.known !=", "frequency bands. \"\"\" import argparse import numpy as np import tsinfer import tskit", "time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable sites", "derived alleles. Note that if n_alleles > 2 this # may not be", "> 2 this # may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived /", "of *all* derived alleles. Note that if n_alleles > 2 this # may", "only one ancestor in each time point, which can cause difficulties parallelising the", "counts.derived assert counts.known != counts.ancestral assert counts.known > 0 # Time = freq", "counts.ancestral assert counts.known > 0 # Time = freq of *all* derived alleles.", "for j, variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts", "variant in enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes)", "enumerate(sd.variants(inference_sites=True)): time = variant.site.time if time == tsinfer.constants.TIME_UNSPECIFIED: counts = tsinfer.formats.allele_counts(variant.genotypes) # Non-variable", "may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228 times[variant.site.id] = counts.derived / counts.known sd.sites_time[:] = np.around(times", "time point, which can cause difficulties parallelising the inference. This script takes a", "into frequency bands. \"\"\" import argparse import numpy as np import tsinfer import" ]
[ "on 2021-05-10 00:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "Generated by Django 3.2 on 2021-05-10 00:54 from django.db import migrations, models class", "Django 3.2 on 2021-05-10 00:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "00:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations =", "[ ('library_api', '0038_auto_20210510_0054'), ] operations = [ migrations.AlterField( model_name='denda', name='jumlah_hari_telat', field=models.IntegerField(null=True), ), ]", "class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations = [ migrations.AlterField( model_name='denda',", "dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations = [ migrations.AlterField( model_name='denda', name='jumlah_hari_telat', field=models.IntegerField(null=True),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations = [", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ]", "= [ ('library_api', '0038_auto_20210510_0054'), ] operations = [ migrations.AlterField( model_name='denda', name='jumlah_hari_telat', field=models.IntegerField(null=True), ),", "models class Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations = [ migrations.AlterField(", "3.2 on 2021-05-10 00:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "by Django 3.2 on 2021-05-10 00:54 from django.db import migrations, models class Migration(migrations.Migration):", "2021-05-10 00:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('library_api',", "Migration(migrations.Migration): dependencies = [ ('library_api', '0038_auto_20210510_0054'), ] operations = [ migrations.AlterField( model_name='denda', name='jumlah_hari_telat',", "# Generated by Django 3.2 on 2021-05-10 00:54 from django.db import migrations, models" ]
[ "locale (str): locale used when building package :param dmg_file (str): DMG file to", "app description from :param sap_code (str): application SAP code :param locale (str): locale", "\"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\")", "list = field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir: Path = field(compare=False, repr=False)", "Any]]) -> Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia dictionary based on SAP", "Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific components of the OptionXML dict :param", "json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError:", "pull values from :param acrobat (bool): process different values from the XML\"\"\" #", "if len(descriptions) > 1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version: str,", "locale: str) -> str: \"\"\"Process the Application.json file to get a description to", "\"Safari\"]} # Current SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\":", "uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path] = None) ->", "a dict, depending on whether # the value is being passed in from", "\"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device Packaging\",", ":param dmg_file (str): DMG file to mount (currently only applies to Acrobat)\"\"\" opt_xml", "Any, Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse import urlparse from .appicon import", "Path, version: str, pkginfo_ext: str) -> str: \"\"\"Guess the resulting pkginfo file based", "descriptions = list() # Adobe does weird stuff, like duplicate strings... for desc", "= install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] =", "BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if", "= install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] =", "as other packages, so handle # this through the try/except catcher try: hdmedia", "read_xml from . import acrobat from . import application if TYPE_CHECKING: from .munkirepo", "pkg_name, arch, and sap_code only arch: str = field(compare=True) sap_code: str = field(compare=True)", "sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() -> None:", "to pull values from :param acrobat (bool): process different values from the XML\"\"\"", "not appear to have the # same HDMedias key structure as other packages,", "arch: str = field(compare=True) sap_code: str = field(compare=True) display_name: str = field(compare=False) version:", "in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name", "open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def", "uninstall_pkg (Path): path to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param", "\"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str =", "= display_name result[\"arch\"] = \"x86_64\" if arch and arch == \"x64\" else arch", "list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"]", "str) -> str: \"\"\"Process the Application.json file to get a description to use", "class AdobePackage: pkg_name: str = field(compare=True) # Compare on pkg_name, arch, and sap_code", "package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"]", "package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"]", "the package based on information in the media dict :param sap_code (str): SAP", "= None with open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\")", "self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP codes with human friendly names\"\"\" padding", "None with open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return", "OptionXML dict :param xml (dict): dictionary to pull values from :param acrobat (bool):", "the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"]", "ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC makes your job easier every day", "from dataclasses import dataclass, field from pathlib import Path from sys import exit", "_locale in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions)", "of MunkiImportPreferences :param locale (str): locale used when building package :param dmg_file (str):", "exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get the minium OS version required :param", "sap_code in SAP_CODES: result = media break except AttributeError: result = hdmedia return", "from .munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]}", "display_name: str = field(compare=False) version: str = field(compare=False) min_os: str = field(compare=False) installer:", "[\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\",", "to have the # same HDMedias key structure as other packages, so handle", "dataclasses import dataclass, field from pathlib import Path from sys import exit from", "import plistlib from dataclasses import dataclass, field from pathlib import Path from sys", "-> str: \"\"\"Process the Application.json file to get a description to use in", "version: str, pkginfo_ext: str) -> str: \"\"\"Guess the resulting pkginfo file based on", "observed munkiimport behaviour :param pkg_name (str): the package name :param version (str): the", "list_locales() -> None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES:", "Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe", "= app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does weird stuff, like duplicate strings...", "Optional[Path] = None) -> AdobePackage: \"\"\"Process an installer package for product information :param", "# Current SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe", "relevant HDMedia dictionary based on SAP code values :param hdmedia (list): list of", "\"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg:", "= list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\":", "Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES =", "= field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir: Path = field(compare=False, repr=False) description:", "pull OS requirements from\"\"\" result = None with open(f, \"rb\") as plist_file: plist", "\"\"\"Process specific components of the OptionXML dict :param xml (dict): dictionary to pull", "locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] =", "instance of MunkiImportPreferences :param locale (str): locale used when building package :param dmg_file", "\"x86_64\" if arch and arch == \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"]", "Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} #", "Any]: \"\"\"Process specific components of the OptionXML dict :param xml (dict): dictionary to", "if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft", "get_min_os_ver(f: Path) -> str: \"\"\"Get the minium OS version required :param f (Path):", "to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str): locale", "hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name", "\"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\":", "media dict :param sap_code (str): SAP Code for the product\"\"\" return SAP_CODES[sap_code] def", "(dict): dictionary to pull values from :param acrobat (bool): process different values from", "Application.json file to get a description to use in munki :param install_pkg (Path):", "= field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir: Path", "in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions)", "get a description to use in munki :param install_pkg (Path): install package to", "= media break except AttributeError: result = hdmedia return result def process_display_name(sap_code: str)", "HDMedia can be either a list or a dict, depending on whether #", "\"\"\"Process an installer package for product information :param install_pkg (Path): path to install", "SAP code :param locale (str): locale value used when building the package\"\"\" json_file", "DC makes your job easier every day with the trusted PDF converter.\" package.update(acrobat_patches)", "KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does weird stuff, like", "str = field(compare=True) sap_code: str = field(compare=True) display_name: str = field(compare=False) version: str", "can be either a list or a dict, depending on whether # the", "is being passed in from Adobe Acrobat or a # optionXML.xml file from", "str: \"\"\"Get the minium OS version required :param f (Path): Info.plist file to", "InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom", "__post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP codes with human", "\"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\",", "= install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"]", "field(compare=False, repr=False) imported: bool = field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def", ":param hdmedia (list): list of HDMedia dictionaries\"\"\" # Note: HDMedia can be either", "file based on observed munkiimport behaviour :param pkg_name (str): the package name :param", "in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES: result =", "optionXML.xml file from other Adobe apps try: for media in hdmedia: sap_code =", "catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict()", "-> AdobePackage: \"\"\"Process an installer package for product information :param install_pkg (Path): path", "TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\",", "\"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str = field(compare=True) # Compare", "munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs:", "apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP codes for Adobe", "for product information :param install_pkg (Path): path to install package :param uninstall_pkg (Path):", "str = field(compare=False) version: str = field(compare=False) min_os: str = field(compare=False) installer: Path", "desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does weird stuff, like duplicate", "\"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\":", "= hdmedia return result def process_display_name(sap_code: str) -> str: \"\"\"Parse out a display", "path to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str):", "repr=False) description: str = field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported: bool =", "result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]:", "= acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC makes your", "for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process", "based on SAP code values :param hdmedia (list): list of HDMedia dictionaries\"\"\" #", "result = hdmedia return result def process_display_name(sap_code: str) -> str: \"\"\"Parse out a", "\"Adobe XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\",", "a display name for the package based on information in the media dict", ":param xml (dict): dictionary to pull values from :param acrobat (bool): process different", "components of the OptionXML dict :param xml (dict): dictionary to pull values from", "the application verision :param pkginfo_ext (str): the pkginfo extension per munkirepo configuration\"\"\" result", "stuff, like duplicate strings... for desc in desc_locales: _locale = desc[\"locale\"] if _locale", "from sys import exit from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING", "# this through the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia", "plistlib from dataclasses import dataclass, field from pathlib import Path from sys import", "on pkg_name, arch, and sap_code only arch: str = field(compare=True) sap_code: str =", "field(compare=False) icon_dir: Path = field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file: str =", "arch == \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result", "verision :param pkginfo_ext (str): the pkginfo extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\"", "{source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales()", "hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code =", "depending on whether # the value is being passed in from Adobe Acrobat", "based on information in the media dict :param sap_code (str): SAP Code for", "= (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)}", "Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\":", "for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path) -> str:", "result[\"arch\"] = \"x86_64\" if arch and arch == \"x64\" else arch result[\"version\"] =", "xml (dict): dictionary to pull values from :param acrobat (bool): process different values", "= \"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process an installer package for", "result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str: \"\"\"Guess the resulting", "information in the media dict :param sap_code (str): SAP Code for the product\"\"\"", "package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str): locale used when", "= None) -> AdobePackage: \"\"\"Process an installer package for product information :param install_pkg", ":param locale (str): locale used when building package :param dmg_file (str): DMG file", "to install package :param uninstall_pkg (Path): path to uninstall package :param munkiimport_prefs (MunkiImportPreferences):", "exit() def list_locales() -> None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for locale", "Adobe Acrobat or a # optionXML.xml file from other Adobe apps try: for", "= field(compare=False, repr=False) imported: bool = field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\")", "= self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP codes with human friendly names\"\"\"", "-> str: \"\"\"Get the minium OS version required :param f (Path): Info.plist file", "different values from the XML\"\"\" # Note: The Acrobat optionXML.xml file does not", "and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions) >", "display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if", "a # optionXML.xml file from other Adobe apps try: for media in hdmedia:", "field from pathlib import Path from sys import exit from typing import Any,", "[\"Microsoft Word\", \"Safari\"]} # Current SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES", "\"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\",", "bool = field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None:", "sap_code: str, locale: str) -> str: \"\"\"Process the Application.json file to get a", "uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str): locale used", "f (Path): Info.plist file to pull OS requirements from\"\"\" result = None with", "padding = len(max([sc for sc, _ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\")", "acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC makes your job", "description from :param sap_code (str): application SAP code :param locale (str): locale value", "Current SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After", "Path = field(compare=False) uninstaller: Path = field(compare=False) receipts: list = field(compare=False) blocking_apps: list", "urlparse from .appicon import find_app_icon from .xmltodict import convert_xml, read_xml from . import", "as plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List,", "\"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name:", "field(compare=False) min_os: str = field(compare=False) installer: Path = field(compare=False) uninstaller: Path = field(compare=False)", "install package to process app description from :param sap_code (str): application SAP code", "= plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull", "pkginfo file based on observed munkiimport behaviour :param pkg_name (str): the package name", "minium OS version required :param f (Path): Info.plist file to pull OS requirements", "import exit from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse", "requirements from\"\"\" result = None with open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file)", "the XML\"\"\" # Note: The Acrobat optionXML.xml file does not appear to have", "information :param install_pkg (Path): path to install package :param uninstall_pkg (Path): path to", "Compare on pkg_name, arch, and sap_code only arch: str = field(compare=True) sap_code: str", "acrobat (bool): process different values from the XML\"\"\" # Note: The Acrobat optionXML.xml", "package :param dmg_file (str): DMG file to mount (currently only applies to Acrobat)\"\"\"", "len(max([sc for sc, _ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from:", "# https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe", ":param version (str): the application verision :param pkginfo_ext (str): the pkginfo extension per", "package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) #", "locale used when building package :param dmg_file (str): DMG file to mount (currently", "or a dict, depending on whether # the value is being passed in", "import acrobat from . import application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences #", "dictionaries\"\"\" # Note: HDMedia can be either a list or a dict, depending", "SAP Code for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any,", "Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\":", "Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe", "\"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\",", "guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str: \"\"\"Guess the resulting pkginfo file", "uninstaller: Path = field(compare=False) receipts: list = field(compare=False) blocking_apps: list = field(compare=False) app_icon:", "sys import exit from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING from", "== locale and _locale in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result", "icon_dir: Path = field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file: str = field(compare=False,", "process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path] =", "\"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\",", "-> Dict[Any, Any]: \"\"\"Process specific components of the OptionXML dict :param xml (dict):", "str, pkginfo_ext: str) -> str: \"\"\"Guess the resulting pkginfo file based on observed", "passed in from Adobe Acrobat or a # optionXML.xml file from other Adobe", "a list or a dict, depending on whether # the value is being", "\"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process an installer package for product", "= field(compare=False) installer: Path = field(compare=False) uninstaller: Path = field(compare=False) receipts: list =", "behaviour :param pkg_name (str): the package name :param version (str): the application verision", "from\"\"\" result = None with open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file) result", "Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES", "arch and arch == \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code", "the minium OS version required :param f (Path): Info.plist file to pull OS", "package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except", "package[\"description\"] = \"Adobe Acrobat Pro DC makes your job easier every day with", "\"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str = field(compare=True) # Compare on", "sap_code (str): SAP Code for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any])", "media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES: result = media break except AttributeError:", "field(compare=False) version: str = field(compare=False) min_os: str = field(compare=False) installer: Path = field(compare=False)", "self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP codes with human friendly", "package :param uninstall_pkg (Path): path to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of", "field(compare=True) sap_code: str = field(compare=True) display_name: str = field(compare=False) version: str = field(compare=False)", "\"\"\"Pull out the relevant HDMedia dictionary based on SAP code values :param hdmedia", "# Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\",", "a description to use in munki :param install_pkg (Path): install package to process", "dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process an installer package for product information", "file from other Adobe apps try: for media in hdmedia: sap_code = media.get(\"SAPCode\")", "try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result =", "blocking_apps: list = field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir: Path = field(compare=False,", "package based on information in the media dict :param sap_code (str): SAP Code", "and sap_code only arch: str = field(compare=True) sap_code: str = field(compare=True) display_name: str", "an installer package for product information :param install_pkg (Path): path to install package", "field(compare=False) receipts: list = field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path, None] =", "None] = field(compare=False) icon_dir: Path = field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file:", "or a # optionXML.xml file from other Adobe apps try: for media in", ":param pkg_name (str): the package name :param version (str): the application verision :param", "job easier every day with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"],", "from .xmltodict import convert_xml, read_xml from . import acrobat from . import application", "\"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\",", "-> str: \"\"\"Guess the resulting pkginfo file based on observed munkiimport behaviour :param", "(str): the pkginfo extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def", ":param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str): locale used when building", "and sap_code in SAP_CODES: result = media break except AttributeError: result = hdmedia", "SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() -> None: \"\"\"List supported locale", "= field(compare=False) min_os: str = field(compare=False) installer: Path = field(compare=False) uninstaller: Path =", "Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile", "pathlib import Path from sys import exit from typing import Any, Dict, List,", "if _locale == locale and _locale in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions:", "version: str = field(compare=False) min_os: str = field(compare=False) installer: Path = field(compare=False) uninstaller:", "= sap_code return result def process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str:", "Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance", "arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg: Path, sap_code:", "except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does weird stuff,", "convert_xml, read_xml from . import acrobat from . import application if TYPE_CHECKING: from", "required :param f (Path): Info.plist file to pull OS requirements from\"\"\" result =", "opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"]", "have the # same HDMedias key structure as other packages, so handle #", "does not appear to have the # same HDMedias key structure as other", "the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"], munkiimport_prefs.pkginfo_extension) result = AdobePackage(**package)", "\"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\":", "prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() -> None: \"\"\"List", "\"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\",", "> 1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str)", "= field(compare=False) icon_dir: Path = field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file: str", "value used when building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file)", "product information :param install_pkg (Path): path to install package :param uninstall_pkg (Path): path", "from . import application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking apps", "_locale = desc[\"locale\"] if _locale == locale and _locale in SUPPORTED_LOCALES and desc[\"value\"]", "Acrobat optionXML.xml file does not appear to have the # same HDMedias key", "pkginfo_ext (str): the pkginfo extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result", "result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if arch and arch == \"x64\" else", "result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file:", ". import acrobat from . import application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences", "result = None with open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file) result =", "\"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\",", "{prod_name}\") exit() def list_locales() -> None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for", "\"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\")", "codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f:", "display name for the package based on information in the media dict :param", "\"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\":", "desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions) > 1", "descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions) return", "locale codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def", "Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] ==", "\"\"\"List SAP codes with human friendly names\"\"\" padding = len(max([sc for sc, _", "for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe", "configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences',", "locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get", "handle # this through the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError:", "Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse import urlparse from .appicon import find_app_icon", "Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device", "to process app description from :param sap_code (str): application SAP code :param locale", "Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\",", "= \"Adobe Acrobat Pro DC makes your job easier every day with the", "def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path]", "out a display name for the package based on information in the media", "= plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) ->", "from :param acrobat (bool): process different values from the XML\"\"\" # Note: The", "codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\":", "type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC makes your job easier every", "MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP", "package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale)", "str = \"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process an installer package", "strings... for desc in desc_locales: _locale = desc[\"locale\"] if _locale == locale and", "import application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS =", "None) -> AdobePackage: \"\"\"Process an installer package for product information :param install_pkg (Path):", "easier every day with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"],", "version required :param f (Path): Info.plist file to pull OS requirements from\"\"\" result", "installer: Path = field(compare=False) uninstaller: Path = field(compare=False) receipts: list = field(compare=False) blocking_apps:", "\"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\",", "installer package for product information :param install_pkg (Path): path to install package :param", "optionXML.xml file does not appear to have the # same HDMedias key structure", "TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"]", "= field(compare=True) # Compare on pkg_name, arch, and sap_code only arch: str =", "\"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe", "str = field(compare=False) installer: Path = field(compare=False) uninstaller: Path = field(compare=False) receipts: list", "result def process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str: \"\"\"Process the Application.json", "names\"\"\" padding = len(max([sc for sc, _ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\"", "= get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"])", "= application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales", "!= \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches =", "the OptionXML dict :param xml (dict): dictionary to pull values from :param acrobat", "# type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC makes your job easier", "\"\"\"Process the Application.json file to get a description to use in munki :param", "str: \"\"\"Guess the resulting pkginfo file based on observed munkiimport behaviour :param pkg_name", "def list_locales() -> None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for locale in", "munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str): locale used when building package", "Path from sys import exit from typing import Any, Dict, List, Optional, Union,", "SAP_CODES: result = media break except AttributeError: result = hdmedia return result def", "the relevant HDMedia dictionary based on SAP code values :param hdmedia (list): list", "= \"x86_64\" if arch and arch == \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\")", "media break except AttributeError: result = hdmedia return result def process_display_name(sap_code: str) ->", "SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions)", "print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path)", "the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific", "dataclass, field from pathlib import Path from sys import exit from typing import", "munki :param install_pkg (Path): install package to process app description from :param sap_code", "(str): the application verision :param pkginfo_ext (str): the pkginfo extension per munkirepo configuration\"\"\"", "= field(compare=False) uninstaller: Path = field(compare=False) receipts: list = field(compare=False) blocking_apps: list =", ":param sap_code (str): application SAP code :param locale (str): locale value used when", "= dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] =", "Any]) -> Dict[Any, Any]: \"\"\"Process specific components of the OptionXML dict :param xml", "not in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions) > 1 else", "in munki :param install_pkg (Path): install package to process app description from :param", "str) -> str: \"\"\"Parse out a display name for the package based on", "\"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe", "only arch: str = field(compare=True) sap_code: str = field(compare=True) display_name: str = field(compare=False)", "try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() #", "\"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe", "= len(max([sc for sc, _ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced", "OS requirements from\"\"\" result = None with open(f, \"rb\") as plist_file: plist =", "def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP codes with", "application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\":", "# Adobe does weird stuff, like duplicate strings... for desc in desc_locales: _locale", "Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\":", "Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\":", "Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe", "package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] =", "in desc_locales: _locale = desc[\"locale\"] if _locale == locale and _locale in SUPPORTED_LOCALES", "print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get the minium OS", "return result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\",", "to use in munki :param install_pkg (Path): install package to process app description", ":param locale (str): locale value used when building the package\"\"\" json_file = application.find_application_json(install_pkg,", "urllib.parse import urlparse from .appicon import find_app_icon from .xmltodict import convert_xml, read_xml from", "= field(compare=False) receipts: list = field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path, None]", "uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] =", "\"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character", "SAP codes with human friendly names\"\"\" padding = len(max([sc for sc, _ in", "when building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales", "1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) ->", "this through the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia =", "Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe", "Union[Path, None] = field(compare=False) icon_dir: Path = field(compare=False, repr=False) description: str = field(compare=False)", "if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\":", "\"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str: \"\"\"Guess", "app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions", "(Path): install package to process app description from :param sap_code (str): application SAP", "used when building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try:", "Word\", \"Safari\"]} # Current SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES =", "\"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\",", ":param f (Path): Info.plist file to pull OS requirements from\"\"\" result = None", "\"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe", "\"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate", "\"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\",", ":param install_pkg (Path): path to install package :param uninstall_pkg (Path): path to uninstall", "in SAP_CODES: result = media break except AttributeError: result = hdmedia return result", "per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg: Path,", "= field(compare=True) sap_code: str = field(compare=True) display_name: str = field(compare=False) version: str =", "# same HDMedias key structure as other packages, so handle # this through", "description to use in munki :param install_pkg (Path): install package to process app", "convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist)", "locale and _locale in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result =", "the value is being passed in from Adobe Acrobat or a # optionXML.xml", "\"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere", "(currently only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info =", "Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\":", "file to mount (currently only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist =", "@dataclass(eq=True, order=True) class AdobePackage: pkg_name: str = field(compare=True) # Compare on pkg_name, arch,", "\"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\",", "package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches", "Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\",", ":param install_pkg (Path): install package to process app description from :param sap_code (str):", "\"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe", "sap_code = media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES: result = media break", "list() # Adobe does weird stuff, like duplicate strings... for desc in desc_locales:", "and arch == \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return", "def get_min_os_ver(f: Path) -> str: \"\"\"Get the minium OS version required :param f", "def list_sap_codes() -> None: \"\"\"List SAP codes with human friendly names\"\"\" padding =", "List, Optional, Union, TYPE_CHECKING from urllib.parse import urlparse from .appicon import find_app_icon from", "description: str = field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported: bool = field(default=False,", "SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\",", "result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg: Path, sap_code: str,", "= application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions =", "\"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\",", "\"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\",", "from: {source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def", "= {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP codes for Adobe products. #", "list of HDMedia dictionaries\"\"\" # Note: HDMedia can be either a list or", "in from Adobe Acrobat or a # optionXML.xml file from other Adobe apps", "(Path): path to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences :param locale", "on whether # the value is being passed in from Adobe Acrobat or", "\"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\",", ".xmltodict import convert_xml, read_xml from . import acrobat from . import application if", "Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\",", "str: \"\"\"Process the Application.json file to get a description to use in munki", "dictionary to pull values from :param acrobat (bool): process different values from the", "products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\":", "does weird stuff, like duplicate strings... for desc in desc_locales: _locale = desc[\"locale\"]", "package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list())", "munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process", "list = field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir:", "process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific components of the OptionXML dict", "Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe", "whether # the value is being passed in from Adobe Acrobat or a", "\"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str = field(compare=True)", "process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str: \"\"\"Process the Application.json file to", "package name :param version (str): the application verision :param pkginfo_ext (str): the pkginfo", "dict, depending on whether # the value is being passed in from Adobe", "Package\"\"\" import plistlib from dataclasses import dataclass, field from pathlib import Path from", ":param sap_code (str): SAP Code for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any,", "from Adobe Acrobat or a # optionXML.xml file from other Adobe apps try:", "the media dict :param sap_code (str): SAP Code for the product\"\"\" return SAP_CODES[sap_code]", "so handle # this through the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except", "= convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] =", "HDMedias key structure as other packages, so handle # this through the try/except", "Note: The Acrobat optionXML.xml file does not appear to have the # same", "package for product information :param install_pkg (Path): path to install package :param uninstall_pkg", "Info.plist file to pull OS requirements from\"\"\" result = None with open(f, \"rb\")", "\"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\",", ".munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} #", "\"FLPR\": \"Adobe Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\",", "-> None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\"", "result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out the relevant", "same HDMedias key structure as other packages, so handle # this through the", "use in munki :param install_pkg (Path): install package to process app description from", "str = field(compare=True) display_name: str = field(compare=False) version: str = field(compare=False) min_os: str", "application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales =", "str: \"\"\"Parse out a display name for the package based on information in", "The Acrobat optionXML.xml file does not appear to have the # same HDMedias", "field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir: Path = field(compare=False, repr=False) description: str", "the # same HDMedias key structure as other packages, so handle # this", "import Any, Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse import urlparse from .appicon", "for desc in desc_locales: _locale = desc[\"locale\"] if _locale == locale and _locale", "result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if arch and arch", "with human friendly names\"\"\" padding = len(max([sc for sc, _ in SAP_CODES.items()], key=len))", "either a list or a dict, depending on whether # the value is", "Adobe does weird stuff, like duplicate strings... for desc in desc_locales: _locale =", "= process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] =", "in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get the", "{sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() -> None: \"\"\"List supported locale codes\"\"\" print(\"Supported", "pkginfo extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path,", "Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info)", "plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out", "(str): application SAP code :param locale (str): locale value used when building the", "\"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\",", "dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\")", "source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\"", "SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific components of the", "version (str): the application verision :param pkginfo_ext (str): the pkginfo extension per munkirepo", "weird stuff, like duplicate strings... for desc in desc_locales: _locale = desc[\"locale\"] if", "through the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"])", "pkg_name (str): the package name :param version (str): the application verision :param pkginfo_ext", "try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code", "only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"]", "process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"],", "dict :param sap_code (str): SAP Code for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info:", "install package :param uninstall_pkg (Path): path to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance", "app_icon: Union[Path, None] = field(compare=False) icon_dir: Path = field(compare=False, repr=False) description: str =", "sap_code return result def process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str: \"\"\"Process", "\"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\",", "{\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html", "package to process app description from :param sap_code (str): application SAP code :param", "from .appicon import find_app_icon from .xmltodict import convert_xml, read_xml from . import acrobat", "Path) -> str: \"\"\"Get the minium OS version required :param f (Path): Info.plist", "\"\"\"Guess the resulting pkginfo file based on observed munkiimport behaviour :param pkg_name (str):", "with open(f, \"rb\") as plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result", "\"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str = field(compare=True) #", "if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe", "Adobe apps try: for media in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and", "_ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code,", "out the relevant HDMedia dictionary based on SAP code values :param hdmedia (list):", "Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\",", "\"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\",", "if sap_code and sap_code in SAP_CODES: result = media break except AttributeError: result", "when building package :param dmg_file (str): DMG file to mount (currently only applies", "sc, _ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for", "(Path): Info.plist file to pull OS requirements from\"\"\" result = None with open(f,", "= find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg,", "your job easier every day with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] =", "(MunkiImportPreferences): instance of MunkiImportPreferences :param locale (str): locale used when building package :param", "plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any,", "codes with human friendly names\"\"\" padding = len(max([sc for sc, _ in SAP_CODES.items()],", "(str): locale used when building package :param dmg_file (str): DMG file to mount", "Note: HDMedia can be either a list or a dict, depending on whether", "= uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"]", "= process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type:", "\"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat", "(list): list of HDMedia dictionaries\"\"\" # Note: HDMedia can be either a list", "# Note: The Acrobat optionXML.xml file does not appear to have the #", "\"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance", "{locale!r}\") exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get the minium OS version required", "from urllib.parse import urlparse from .appicon import find_app_icon from .xmltodict import convert_xml, read_xml", "with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"], munkiimport_prefs.pkginfo_extension) result =", "import find_app_icon from .xmltodict import convert_xml, read_xml from . import acrobat from .", "Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\",", "Path = field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file: str = field(compare=False, repr=False)", "in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if", "the package name :param version (str): the application verision :param pkginfo_ext (str): the", "\"Adobe Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\":", "values from :param acrobat (bool): process different values from the XML\"\"\" # Note:", "\".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version:", "(bool): process different values from the XML\"\"\" # Note: The Acrobat optionXML.xml file", "repr=False) imported: bool = field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes()", "= list() # Adobe does weird stuff, like duplicate strings... for desc in", "media in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES: result", "to pull OS requirements from\"\"\" result = None with open(f, \"rb\") as plist_file:", "imported: bool = field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() ->", "for sc, _ in SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\")", "\"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\":", "from . import acrobat from . import application if TYPE_CHECKING: from .munkirepo import", "dict :param xml (dict): dictionary to pull values from :param acrobat (bool): process", "key structure as other packages, so handle # this through the try/except catcher", "being passed in from Adobe Acrobat or a # optionXML.xml file from other", "to get a description to use in munki :param install_pkg (Path): install package", "Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\",", "(str): locale value used when building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json", "building package :param dmg_file (str): DMG file to mount (currently only applies to", "= field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported:", "-> None: \"\"\"List SAP codes with human friendly names\"\"\" padding = len(max([sc for", "sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"]", "\"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str = field(compare=True) # Compare on pkg_name,", "locale (str): locale value used when building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code)", "\" \".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path,", "Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\",", "process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code)", "None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\" -", "str, locale: str) -> str: \"\"\"Process the Application.json file to get a description", "str = field(compare=True) # Compare on pkg_name, arch, and sap_code only arch: str", "\"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\",", "Union, TYPE_CHECKING from urllib.parse import urlparse from .appicon import find_app_icon from .xmltodict import", "= hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg: Path, sap_code: str, locale:", ":param uninstall_pkg (Path): path to uninstall package :param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences", "friendly names\"\"\" padding = len(max([sc for sc, _ in SAP_CODES.items()], key=len)) source =", "str) -> str: \"\"\"Guess the resulting pkginfo file based on observed munkiimport behaviour", "receipts: list = field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path, None] = field(compare=False)", "return result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str: \"\"\"Guess the", "to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package =", "process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia dictionary", "package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] !=", "(\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} -", "'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process an", "= BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path)", "human friendly names\"\"\" padding = len(max([sc for sc, _ in SAP_CODES.items()], key=len)) source", "After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\",", "sap_code only arch: str = field(compare=True) sap_code: str = field(compare=True) display_name: str =", "specific components of the OptionXML dict :param xml (dict): dictionary to pull values", "application SAP code :param locale (str): locale value used when building the package\"\"\"", "Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\",", "Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\",", "Path = field(compare=False) receipts: list = field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path,", "SAP code values :param hdmedia (list): list of HDMedia dictionaries\"\"\" # Note: HDMedia", "field(compare=False, repr=False) description: str = field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported: bool", "# the value is being passed in from Adobe Acrobat or a #", "acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC makes", "be either a list or a dict, depending on whether # the value", "sap_code: str = field(compare=True) display_name: str = field(compare=False) version: str = field(compare=False) min_os:", "\"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"]", "Path, sap_code: str, locale: str) -> str: \"\"\"Process the Application.json file to get", "in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() -> None: \"\"\"List supported", "from the XML\"\"\" # Note: The Acrobat optionXML.xml file does not appear to", "install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg", "str = field(compare=False) min_os: str = field(compare=False) installer: Path = field(compare=False) uninstaller: Path", "except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"] arch =", "SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\",", "acrobat from . import application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking", "sap_code) app_json = application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]", "\"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\":", "import convert_xml, read_xml from . import acrobat from . import application if TYPE_CHECKING:", "supported locale codes\"\"\" print(\"Supported locales:\") for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit()", "PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"], munkiimport_prefs.pkginfo_extension) result = AdobePackage(**package) return result", "def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific components of the OptionXML", "# Note: HDMedia can be either a list or a dict, depending on", "import Path from sys import exit from typing import Any, Dict, List, Optional,", "display_name result[\"arch\"] = \"x86_64\" if arch and arch == \"x64\" else arch result[\"version\"]", "munkiimport behaviour :param pkg_name (str): the package name :param version (str): the application", "Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"}", "\"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\",", "arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"]", "import dataclass, field from pathlib import Path from sys import exit from typing", "return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out the", "None: \"\"\"List SAP codes with human friendly names\"\"\" padding = len(max([sc for sc,", "MunkiImportPreferences :param locale (str): locale used when building package :param dmg_file (str): DMG", "Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path] = None)", "def process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str: \"\"\"Process the Application.json file", "Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\":", "plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]])", "file does not appear to have the # same HDMedias key structure as", "len(descriptions) > 1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext:", "makes your job easier every day with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"]", "packages, so handle # this through the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"])", "Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe", "Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia dictionary based on", "<gh_stars>1-10 \"\"\"Adobe Package\"\"\" import plistlib from dataclasses import dataclass, field from pathlib import", "-> str: \"\"\"Parse out a display name for the package based on information", "on observed munkiimport behaviour :param pkg_name (str): the package name :param version (str):", "building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json = application.read_json_file(json_file) try: desc_locales =", "\"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device Packaging\", \"FRSC\":", "= Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"]", "install_pkg (Path): install package to process app description from :param sap_code (str): application", "\"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file)", "the Application.json file to get a description to use in munki :param install_pkg", "https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media", "code values :param hdmedia (list): list of HDMedia dictionaries\"\"\" # Note: HDMedia can", "\"\"\"Parse out a display name for the package based on information in the", "process_app_description(install_pkg, package[\"sap_code\"], locale) if package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type]", "package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"], locale) if", "except AttributeError: result = hdmedia return result def process_display_name(sap_code: str) -> str: \"\"\"Parse", "on information in the media dict :param sap_code (str): SAP Code for the", "Pro DC makes your job easier every day with the trusted PDF converter.\"", "\"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance", "Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\",", "resulting pkginfo file based on observed munkiimport behaviour :param pkg_name (str): the package", "(str): SAP Code for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) ->", "pkg_name: str = field(compare=True) # Compare on pkg_name, arch, and sap_code only arch:", "locale: str = \"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage: \"\"\"Process an installer", "hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name", "package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] =", "install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if arch and arch == \"x64\"", "apps try: for media in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and sap_code", "\"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class", "locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\",", "SAP_CODES.items()], key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name in", "install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg", "Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia dictionary based", "like duplicate strings... for desc in desc_locales: _locale = desc[\"locale\"] if _locale ==", "dictionary based on SAP code values :param hdmedia (list): list of HDMedia dictionaries\"\"\"", "= \" \".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name:", "== \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro", "process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if arch and", "min_os: str = field(compare=False) installer: Path = field(compare=False) uninstaller: Path = field(compare=False) receipts:", ". import application if TYPE_CHECKING: from .munkirepo import MunkiImportPreferences # Blocking apps BLOCKING_APPS", "HDMedia dictionary based on SAP code values :param hdmedia (list): list of HDMedia", "locales:\") for locale in SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path) ->", "from other Adobe apps try: for media in hdmedia: sap_code = media.get(\"SAPCode\") if", "trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"], munkiimport_prefs.pkginfo_extension) result = AdobePackage(**package) return", "from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse import urlparse", "\"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\",", "== \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def", "compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP codes", "\"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\":", "duplicate strings... for desc in desc_locales: _locale = desc[\"locale\"] if _locale == locale", "XD\"} # Supported locales SUPPORTED_LOCALES = [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\",", "result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg: Path, sap_code: str, locale: str) ->", "application verision :param pkginfo_ext (str): the pkginfo extension per munkirepo configuration\"\"\" result =", "list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"]", "the try/except catcher try: hdmedia = process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result", "def process_display_name(sap_code: str) -> str: \"\"\"Parse out a display name for the package", "typing import Any, Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse import urlparse from", "sap_code and sap_code in SAP_CODES: result = media break except AttributeError: result =", "TYPE_CHECKING from urllib.parse import urlparse from .appicon import find_app_icon from .xmltodict import convert_xml,", "return result def process_display_name(sap_code: str) -> str: \"\"\"Parse out a display name for", "dmg_file (str): DMG file to mount (currently only applies to Acrobat)\"\"\" opt_xml =", "- {locale!r}\") exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get the minium OS version", "Optional, Union, TYPE_CHECKING from urllib.parse import urlparse from .appicon import find_app_icon from .xmltodict", "field(compare=False) uninstaller: Path = field(compare=False) receipts: list = field(compare=False) blocking_apps: list = field(compare=False)", "print(f\"Sourced from: {source}\") for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit()", "and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\",", "Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\":", "\"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\",", "process app description from :param sap_code (str): application SAP code :param locale (str):", "applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package", "other packages, so handle # this through the try/except catcher try: hdmedia =", "\"\"\"Adobe Package\"\"\" import plistlib from dataclasses import dataclass, field from pathlib import Path", "structure as other packages, so handle # this through the try/except catcher try:", "install_pkg (Path): path to install package :param uninstall_pkg (Path): path to uninstall package", "desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe", "pkginfo_file: str = field(compare=False, repr=False) imported: bool = field(default=False, compare=False) def __post_init__(self): self.icon", "values :param hdmedia (list): list of HDMedia dictionaries\"\"\" # Note: HDMedia can be", "= field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List", "= f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str", "\"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe", "\"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere Rush\",", "for sap_code, prod_name in SAP_CODES.items(): print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() ->", "used when building package :param dmg_file (str): DMG file to mount (currently only", "return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific components of", "Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str = \"en_GB\", dmg_file: Optional[Path] = None) -> AdobePackage:", "Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe", "\"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported", "\"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\",", "field(compare=True) # Compare on pkg_name, arch, and sap_code only arch: str = field(compare=True)", "for the package based on information in the media dict :param sap_code (str):", "= field(compare=False) version: str = field(compare=False) min_os: str = field(compare=False) installer: Path =", "mount (currently only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info", "find_app_icon from .xmltodict import convert_xml, read_xml from . import acrobat from . import", "find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path) if package[\"sap_code\"] != \"APRO\": package[\"description\"] = process_app_description(install_pkg, package[\"sap_code\"],", "(Path): path to install package :param uninstall_pkg (Path): path to uninstall package :param", "\"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True)", "str = field(compare=False, repr=False) imported: bool = field(default=False, compare=False) def __post_init__(self): self.icon =", "info_plist = install_pkg.joinpath(\"Contents/Info.plist\") install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"]", "Any]: \"\"\"Pull out the relevant HDMedia dictionary based on SAP code values :param", "hdmedia (list): list of HDMedia dictionaries\"\"\" # Note: HDMedia can be either a", "\"rb\") as plist_file: plist = plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia:", "= app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does", "else \"\".join(descriptions) return result def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str:", "\"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and", "-> Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia dictionary based on SAP code", "result = media break except AttributeError: result = hdmedia return result def process_display_name(sap_code:", "= install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] =", "\"Adobe Substance Designer\", \"SBSTP\": \"Adobe Substance Painter\", \"SPRK\": \"Adobe XD\"} # Supported locales", "\"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\", \"no_NO\",", "key=len)) source = (\"https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/\" \"kb/apps-deployed-without-base-versions.ug.html\") print(f\"Sourced from: {source}\") for sap_code, prod_name in SAP_CODES.items():", "name :param version (str): the application verision :param pkginfo_ext (str): the pkginfo extension", "DMG file to mount (currently only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist", "based on observed munkiimport behaviour :param pkg_name (str): the package name :param version", "import urlparse from .appicon import find_app_icon from .xmltodict import convert_xml, read_xml from .", "field(compare=False) installer: Path = field(compare=False) uninstaller: Path = field(compare=False) receipts: list = field(compare=False)", "app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does weird", "every day with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"], munkiimport_prefs.pkginfo_extension)", "AdobePackage: \"\"\"Process an installer package for product information :param install_pkg (Path): path to", "return result def process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str: \"\"\"Process the", "hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg: Path, sap_code: str, locale: str)", "\"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\",", "def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str: \"\"\"Guess the resulting pkginfo", "file to get a description to use in munki :param install_pkg (Path): install", "desc[\"locale\"] if _locale == locale and _locale in SUPPORTED_LOCALES and desc[\"value\"] not in", "- {prod_name}\") exit() def list_locales() -> None: \"\"\"List supported locale codes\"\"\" print(\"Supported locales:\")", "= process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if arch", "code :param locale (str): locale value used when building the package\"\"\" json_file =", "order=True) class AdobePackage: pkg_name: str = field(compare=True) # Compare on pkg_name, arch, and", "try: for media in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and sap_code in", "SUPPORTED_LOCALES: print(f\" - {locale!r}\") exit() def get_min_os_ver(f: Path) -> str: \"\"\"Get the minium", "if arch and arch == \"x64\" else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] =", "Dict[Any, Any]: \"\"\"Process specific components of the OptionXML dict :param xml (dict): dictionary", "the resulting pkginfo file based on observed munkiimport behaviour :param pkg_name (str): the", "\"Adobe Acrobat Pro DC makes your job easier every day with the trusted", "Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\":", "the pkginfo extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg:", "AdobePackage: pkg_name: str = field(compare=True) # Compare on pkg_name, arch, and sap_code only", "def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia", "SAP codes for Adobe products. # https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html SAP_CODES = {\"AEFT\": \"Adobe After Effects\",", "result = \" \".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions) return result def", "exit from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING from urllib.parse import", "arch, and sap_code only arch: str = field(compare=True) sap_code: str = field(compare=True) display_name:", "other Adobe apps try: for media in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code", "to mount (currently only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\") info_plist = install_pkg.joinpath(\"Contents/Info.plist\")", "day with the trusted PDF converter.\" package.update(acrobat_patches) package[\"pkginfo_file\"] = guess_pkginfo_file(package[\"pkg_name\"], package[\"version\"], munkiimport_prefs.pkginfo_extension) result", "Acrobat Pro DC makes your job easier every day with the trusted PDF", "Acrobat or a # optionXML.xml file from other Adobe apps try: for media", "\"RUSH\": \"Adobe Premiere Rush\", \"SBSTA\": \"Adobe Substance Alchemist\", \"SBSTD\": \"Adobe Substance Designer\", \"SBSTP\":", "\"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\", \"he_IL\", \"hu_HU\", \"it_IT\", \"ja_JP\", \"ko_KR\", \"nb_NO\", \"nl_NL\",", "= install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\" if arch and arch ==", "(str): DMG file to mount (currently only applies to Acrobat)\"\"\" opt_xml = install_pkg.joinpath(\"Contents/Resources/optionXML.xml\")", "\"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe", "app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list() # Adobe does weird stuff, like duplicate strings... for", "# optionXML.xml file from other Adobe apps try: for media in hdmedia: sap_code", "Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP codes for", "break except AttributeError: result = hdmedia return result def process_display_name(sap_code: str) -> str:", "list or a dict, depending on whether # the value is being passed", "else arch result[\"version\"] = hdmedia.get(\"productVersion\") result[\"sap_code\"] = sap_code return result def process_app_description(install_pkg: Path,", "= [\"ar_AE\", \"cs_CZ\", \"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\",", "= desc[\"locale\"] if _locale == locale and _locale in SUPPORTED_LOCALES and desc[\"value\"] not", "field(default=False, compare=False) def __post_init__(self): self.icon = self.icon_dir.joinpath(f\"{self.pkg_name}-{self.version}.png\") def list_sap_codes() -> None: \"\"\"List SAP", "file to pull OS requirements from\"\"\" result = None with open(f, \"rb\") as", "package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg,", ":param pkginfo_ext (str): the pkginfo extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return", "\"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe", "= process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name =", "for media in hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES:", "hdmedia: sap_code = media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES: result = media", "product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]: \"\"\"Process specific components", "= field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported: bool = field(default=False, compare=False) def", "desc_locales: _locale = desc[\"locale\"] if _locale == locale and _locale in SUPPORTED_LOCALES and", "str = field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported: bool = field(default=False, compare=False)", "result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale:", "= field(compare=True) display_name: str = field(compare=False) version: str = field(compare=False) min_os: str =", ":param acrobat (bool): process different values from the XML\"\"\" # Note: The Acrobat", "descriptions.append(desc[\"value\"]) result = \" \".join(descriptions) if len(descriptions) > 1 else \"\".join(descriptions) return result", "BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP codes for Adobe products.", "= media.get(\"SAPCode\") if sap_code and sap_code in SAP_CODES: result = media break except", "process different values from the XML\"\"\" # Note: The Acrobat optionXML.xml file does", "install_info = convert_xml(read_xml(opt_xml))[\"InstallInfo\"] package = process_opt_xml(install_info) package[\"installer\"] = install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"]", "values from the XML\"\"\" # Note: The Acrobat optionXML.xml file does not appear", "\"\"\"Get the minium OS version required :param f (Path): Info.plist file to pull", "of HDMedia dictionaries\"\"\" # Note: HDMedia can be either a list or a", "= process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"]", "\"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe Lightroom\", \"LTRM\": \"Adobe Lightroom Classic\",", "on SAP code values :param hdmedia (list): list of HDMedia dictionaries\"\"\" # Note:", "\"da_DK\", \"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\",", "\"nb_NO\", \"nl_NL\", \"no_NO\", \"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True,", "# Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current SAP codes", "\"ESHR\": \"Adobe Dimension\", \"FLPR\": \"Adobe Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\",", "of the OptionXML dict :param xml (dict): dictionary to pull values from :param", "\"pl_PL\", \"pt_BR\", \"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage:", "import MunkiImportPreferences # Blocking apps BLOCKING_APPS = {\"APRO\": [\"Microsoft Word\", \"Safari\"]} # Current", "extension per munkirepo configuration\"\"\" result = f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg:", "field(compare=False) blocking_apps: list = field(compare=False) app_icon: Union[Path, None] = field(compare=False) icon_dir: Path =", "result = dict() sap_code = hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"]", "pkginfo_ext: str) -> str: \"\"\"Guess the resulting pkginfo file based on observed munkiimport", "\"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe Prelude\", \"RUSH\": \"Adobe Premiere", "f\"{pkg_name}-{version}{pkginfo_ext}\" return result def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences', locale: str =", "\"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat Pro DC", "XML\"\"\" # Note: The Acrobat optionXML.xml file does not appear to have the", "in the media dict :param sap_code (str): SAP Code for the product\"\"\" return", "desc in desc_locales: _locale = desc[\"locale\"] if _locale == locale and _locale in", "package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list()", "value is being passed in from Adobe Acrobat or a # optionXML.xml file", "\"de_DE\", \"en_AE\", \"en_GB\", \"en_IL\", \"en_US\", \"en_XM\", \"es_ES\", \"es_MX\", \"fi_FI\", \"fr_CA\", \"fr_FR\", \"fr_MA\", \"fr_XM\",", "result def process_display_name(sap_code: str) -> str: \"\"\"Parse out a display name for the", "{\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe", "Animate and Mobile Device Packaging\", \"FRSC\": \"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe", "\"Adobe Fresco\", \"IDSN\": \"Adobe InDesign\", \"ILST\": \"Adobe Illustrator\", \"KBRG\": \"Adobe Bridge\", \"LRCC\": \"Adobe", "install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] = display_name result[\"arch\"] = \"x86_64\"", "sap_code (str): application SAP code :param locale (str): locale value used when building", "path to install package :param uninstall_pkg (Path): path to uninstall package :param munkiimport_prefs", "\"ru_RU\", \"sv_SE\", \"th_TH\", \"tr_TR\", \"uk_UA\", \"zh_CN\", \"zh_TW\"] @dataclass(eq=True, order=True) class AdobePackage: pkg_name: str", "get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] = list() package[\"app_icon\"] = find_app_icon(install_pkg, package[\"sap_code\"]) package[\"icon_dir\"]", "_locale == locale and _locale in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"])", "process_hdmedia(install_info[\"Medias\"][\"Media\"]) except TypeError: hdmedia = process_hdmedia(install_info[\"HDMedias\"][\"HDMedia\"]) result = dict() sap_code = hdmedia[\"SAPCode\"] arch", "OS version required :param f (Path): Info.plist file to pull OS requirements from\"\"\"", "field(compare=True) display_name: str = field(compare=False) version: str = field(compare=False) min_os: str = field(compare=False)", "HDMedia dictionaries\"\"\" # Note: HDMedia can be either a list or a dict,", "(str): the package name :param version (str): the application verision :param pkginfo_ext (str):", "Code for the product\"\"\" return SAP_CODES[sap_code] def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]:", "and _locale in SUPPORTED_LOCALES and desc[\"value\"] not in descriptions: descriptions.append(desc[\"value\"]) result = \"", "print(f\" {sap_code.ljust(padding)} - {prod_name}\") exit() def list_locales() -> None: \"\"\"List supported locale codes\"\"\"", "from :param sap_code (str): application SAP code :param locale (str): locale value used", "# Compare on pkg_name, arch, and sap_code only arch: str = field(compare=True) sap_code:", "appear to have the # same HDMedias key structure as other packages, so", "hdmedia return result def process_display_name(sap_code: str) -> str: \"\"\"Parse out a display name", "plistlib.load(plist_file) result = plist.get(\"LSMinimumSystemVersion\") return result def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any,", "list_sap_codes() -> None: \"\"\"List SAP codes with human friendly names\"\"\" padding = len(max([sc", "\"LTRM\": \"Adobe Lightroom Classic\", \"PHSP\": \"Adobe Photoshop\", \"PPRO\": \"Adobe Premiere Pro\", \"PRLD\": \"Adobe", "field(compare=False) pkginfo_file: str = field(compare=False, repr=False) imported: bool = field(default=False, compare=False) def __post_init__(self):", "process_display_name(sap_code: str) -> str: \"\"\"Parse out a display name for the package based", "install_pkg package[\"uninstaller\"] = uninstall_pkg package[\"min_os\"] = get_min_os_ver(info_plist) package[\"blocking_apps\"] = BLOCKING_APPS.get(package[\"sap_code\"], list()) package[\"receipts\"] =", ".appicon import find_app_icon from .xmltodict import convert_xml, read_xml from . import acrobat from", "= {\"AEFT\": \"Adobe After Effects\", \"AICY\": \"Adobe InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\":", "AttributeError: result = hdmedia return result def process_display_name(sap_code: str) -> str: \"\"\"Parse out", "= hdmedia[\"SAPCode\"] arch = install_info[\"ProcessorArchitecture\"] display_name = process_display_name(sap_code) result[\"pkg_name\"] = install_info.get(\"PackageName\") result[\"display_name\"] =", "package[\"sap_code\"] == \"APRO\": acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type] package[\"description\"] = \"Adobe Acrobat", "Dict[Any, Any]: \"\"\"Pull out the relevant HDMedia dictionary based on SAP code values", "Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\": \"Adobe Character Animator\", \"DRWV\": \"Adobe Dreamweaver\", \"ESHR\": \"Adobe", "name for the package based on information in the media dict :param sap_code", "from pathlib import Path from sys import exit from typing import Any, Dict,", "application.read_json_file(json_file) try: desc_locales = app_json[\"ProductDescription\"][\"DetailedDescription\"][\"Language\"] except KeyError: desc_locales = app_json[\"ProductDescription\"][\"Tagline\"][\"Language\"] descriptions = list()", "locale value used when building the package\"\"\" json_file = application.find_application_json(install_pkg, sap_code) app_json =", "InCopy\", \"AME\": \"Adobe Media Encoder\", \"APRO\": \"Adobe Acrobat Pro\", \"AUDT\": \"Adobe Audition\", \"CHAR\":" ]
[ "on 2018-07-21 09:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jackpot',", "Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds',", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations =", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations", "09:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ]", "dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ),", "by Django 2.0.6 on 2018-07-21 09:47 from django.db import migrations class Migration(migrations.Migration): dependencies", "Django 2.0.6 on 2018-07-21 09:47 from django.db import migrations class Migration(migrations.Migration): dependencies =", "Generated by Django 2.0.6 on 2018-07-21 09:47 from django.db import migrations class Migration(migrations.Migration):", "migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds', ), migrations.RemoveField( model_name='jackpot', name='home_odds', ), ]", "class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot',", "operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds', ), migrations.RemoveField( model_name='jackpot',", "2018-07-21 09:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'),", "= [ ('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField(", "[ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds', ), migrations.RemoveField( model_name='jackpot', name='home_odds', ),", "2.0.6 on 2018-07-21 09:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "= [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds', ), migrations.RemoveField( model_name='jackpot', name='home_odds',", "('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds',", "import migrations class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations = [", "'0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds', ),", "] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot', name='draw_odds', ), migrations.RemoveField(", "migrations class Migration(migrations.Migration): dependencies = [ ('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField(", "[ ('jackpot', '0008_jackpot_no'), ] operations = [ migrations.RemoveField( model_name='jackpot', name='away_odds', ), migrations.RemoveField( model_name='jackpot',", "# Generated by Django 2.0.6 on 2018-07-21 09:47 from django.db import migrations class" ]
[ "urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA", "'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc',", "try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\") except: self.message = result Exception.__init__(self, self.message)", "= sorted(args.items(), key=lambda val: val[0]) msg = method + '|' + uri +", ":return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active User Orders. This", "= { 'market': market } return self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None,", "'market': market, 'price': price } return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id):", "is None: args = dict() if is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time()", "try: response = requests.request( method, KUNA_API_BASEURL + path, params=args) except requests.RequestException as e:", "# -*- coding: utf-8 -*- \"\"\"Main module.\"\"\" import hashlib import hmac import json", "for 1 BTC :return: \"\"\" args = { 'side': side, 'volume': volume, 'market':", "\"\"\" User trade history This is a User method. :param market: :return: \"\"\"", "\"\"\" uri = '/' + KUNA_API_URL_PREFIX + '/' + path sorted_values = sorted(args.items(),", "volume, market, price): \"\"\" Order placing. This is a User method. :param side:", "\"\"\"Main module.\"\"\" import hashlib import hmac import json import time import requests try:", "{ 'id': order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\"", "unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent market data", "'|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only handle ascii (byte) strings", "import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA =", "This is a User method. :return: \"\"\" args = { 'market': market }", "or 'sell' :param volume: volume in BTC :param market: option from VALID_MARKET_DATA_PAIRS :param", "can only handle ascii (byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg =", "module.\"\"\" import hashlib import hmac import json import time import requests try: from", "request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the given path in the Kuna", "= '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah',", "market: :return: \"\"\" args = { 'market': market } return self.request('order_book', args=args) def", "= int(time.time() * 1000) args['signature'] = self._generate_signature(method, path, args) try: response = requests.request(", "msg = method + '|' + uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\"", "} return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order. This", "} return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User trade history", "'id': order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User", "price): \"\"\" Order placing. This is a User method. :param side: 'buy' or", "method. :param side: 'buy' or 'sell' :param volume: volume in BTC :param market:", "volume in BTC :param market: option from VALID_MARKET_DATA_PAIRS :param price: price for 1", "} return self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches", "= requests.request( method, KUNA_API_BASEURL + path, params=args) except requests.RequestException as e: response =", "'price': price } return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel", "the given arguments. :param path: :param args: :param method: :param is_user_method: :return: \"\"\"", "from VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC :return: \"\"\" args = {", "order_id): \"\"\" Cancel order. This is a User method. :param order_id: :return: \"\"\"", ":param side: 'buy' or 'sell' :param volume: volume in BTC :param market: option", "params=args) except requests.RequestException as e: response = json.loads(e.read()) raise APIError(response) result = response.json()", "and result.get('error'): raise APIError(result) elif response.status_code not in [200, 201, 202]: raise APIError(response.reason)", "result): try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\") except: self.message = result Exception.__init__(self,", "given path with the given arguments. :param path: :param args: :param method: :param", "server. :param market: :return: \"\"\" return self.request('tickers' + '/' + market) def get_order_book(self,", "msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try: self.message =", ":param volume: volume in BTC :param market: option from VALID_MARKET_DATA_PAIRS :param price: price", "def request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the given path in the", "market, price): \"\"\" Order placing. This is a User method. :param side: 'buy'", "APIError(response) result = response.json() if result and isinstance(result, dict) and result.get('error'): raise APIError(result)", "raise APIError(result) elif response.status_code not in [200, 201, 202]: raise APIError(response.reason) return result", "data from server. :param market: :return: \"\"\" return self.request('tickers' + '/' + market)", "We translate args to a valid query string. If post_args is given, we", "method, path, args): \"\"\" Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param", "args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order. This is a User", "market, 'price': price } return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\"", "e: response = json.loads(e.read()) raise APIError(response) result = response.json() if result and isinstance(result,", "User method. :return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active User", "json import time import requests try: from urllib.parse import urlencode except ImportError: from", "market): \"\"\" Get trades history data from server. :param market: :return: \"\"\" args", "send a POST request to the given path with the given arguments. :param", "the User and Assets. This is a User method. :return: \"\"\" return self.request('members/me',", "def get_orders(self, market): \"\"\" Active User Orders. This is a User method. :return:", "'market': market } return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about the User", "'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc']", "time from server. :return: unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\"", "'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\", "This is a User method. :param side: 'buy' or 'sell' :param volume: volume", "msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try:", "market } return self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume, market, price): \"\"\"", "in BTC :param market: option from VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC", "is_user_method=True) def get_trade_history(self, market): \"\"\" User trade history This is a User method.", "Active User Orders. This is a User method. :return: \"\"\" args = {", "post_args is given, we send a POST request to the given path with", ":param args: :return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX + '/' + path", "Assets. This is a User method. :return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self,", "is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order. This is a User method. :param", "+ '/' + path sorted_values = sorted(args.items(), key=lambda val: val[0]) msg = method", "'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None):", "as e: response = json.loads(e.read()) raise APIError(response) result = response.json() if result and", "to the given path with the given arguments. :param path: :param args: :param", "history This is a User method. :param market: :return: \"\"\" args = {", "self.access_key = access_key self.secret_key = secret_key def get_server_time(self): \"\"\" Get the server time", "response.json() if result and isinstance(result, dict) and result.get('error'): raise APIError(result) elif response.status_code not", "requests try: from urllib.parse import urlencode except ImportError: from urllib import urlencode API_VERSION", "APIError(response.reason) return result def _generate_signature(self, method, path, args): \"\"\" Signature is generated by", "is_user_method=True) def get_orders(self, market): \"\"\" Active User Orders. This is a User method.", "a User method. :param side: 'buy' or 'sell' :param volume: volume in BTC", "def put_order(self, side, volume, market, price): \"\"\" Order placing. This is a User", "market): \"\"\" User trade history This is a User method. :param market: :return:", "Get trades history data from server. :param market: :return: \"\"\" args = {", "\"\"\" Cancel order. This is a User method. :param order_id: :return: \"\"\" args", "dict() if is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time() * 1000) args['signature'] =", "import requests try: from urllib.parse import urlencode except ImportError: from urllib import urlencode", "market: :return: \"\"\" args = { 'market': market } return self.request('trades', args=args) def", "raise APIError(response.reason) return result def _generate_signature(self, method, path, args): \"\"\" Signature is generated", "self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order. This is a", "\"\"\" Information about the User and Assets. This is a User method. :return:", "sorted_values = sorted(args.items(), key=lambda val: val[0]) msg = method + '|' + uri", "args: :param method: :param is_user_method: :return: \"\"\" if args is None: args =", "and Assets. This is a User method. :return: \"\"\" return self.request('members/me', is_user_method=True) def", "= { 'id': order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market):", "[200, 201, 202]: raise APIError(response.reason) return result def _generate_signature(self, method, path, args): \"\"\"", "VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA", "market } return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get trades history data", "result = response.json() if result and isinstance(result, dict) and result.get('error'): raise APIError(result) elif", "# https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class", "option from VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC :return: \"\"\" args =", "market } return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about the User and", "get_order_book(self, market): \"\"\" Get order book data from server. :param market: :return: \"\"\"", "\"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent market data from server.", "self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result):", "path in the Kuna API. We translate args to a valid query string.", ":return: \"\"\" args = { 'market': market } return self.request('trades', args=args) def get_user_account_info(self):", "if args is None: args = dict() if is_user_method: args['access_key'] = self.access_key args['tonce']", "args = dict() if is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time() * 1000)", "translate args to a valid query string. If post_args is given, we send", "the given path in the Kuna API. We translate args to a valid", "volume: volume in BTC :param market: option from VALID_MARKET_DATA_PAIRS :param price: price for", "price: price for 1 BTC :return: \"\"\" args = { 'side': side, 'volume':", "args['signature'] = self._generate_signature(method, path, args) try: response = requests.request( method, KUNA_API_BASEURL + path,", ":param method: :param is_user_method: :return: \"\"\" if args is None: args = dict()", "'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah',", "market } return self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False): \"\"\"", "= dict() if is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time() * 1000) args['signature']", "path, params=args) except requests.RequestException as e: response = json.loads(e.read()) raise APIError(response) result =", "\"\"\" if args is None: args = dict() if is_user_method: args['access_key'] = self.access_key", "result.get('error'): raise APIError(result) elif response.status_code not in [200, 201, 202]: raise APIError(response.reason) return", "method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User trade history This is a User", "from server. :param market: :return: \"\"\" args = { 'market': market } return", "\"\"\" Active User Orders. This is a User method. :return: \"\"\" args =", "path: :param args: :return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX + '/' +", "method + '|' + uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC", "= json.loads(e.read()) raise APIError(response) result = response.json() if result and isinstance(result, dict) and", "+ KUNA_API_URL_PREFIX + '/' + path sorted_values = sorted(args.items(), key=lambda val: val[0]) msg", "data from server. :param market: :return: \"\"\" args = { 'market': market }", "sorted(args.items(), key=lambda val: val[0]) msg = method + '|' + uri + '|'", "} return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about the User and Assets.", "from server. :return: unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get", "args=args) def get_trades_history(self, market): \"\"\" Get trades history data from server. :param market:", "APIError(result) elif response.status_code not in [200, 201, 202]: raise APIError(response.reason) return result def", "val: val[0]) msg = method + '|' + uri + '|' + urlencode(sorted_values)", "timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent market data from", "self.access_key args['tonce'] = int(time.time() * 1000) args['signature'] = self._generate_signature(method, path, args) try: response", "not in [200, 201, 202]: raise APIError(response.reason) return result def _generate_signature(self, method, path,", "'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc',", "market: :return: \"\"\" return self.request('tickers' + '/' + market) def get_order_book(self, market): \"\"\"", "uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only handle ascii", "BTC :return: \"\"\" args = { 'side': side, 'volume': volume, 'market': market, 'price':", "\\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key =", "['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object):", "APIError(Exception): def __init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\") except: self.message", "side, volume, market, price): \"\"\" Order placing. This is a User method. :param", ":param price: price for 1 BTC :return: \"\"\" args = { 'side': side,", "if is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time() * 1000) args['signature'] = self._generate_signature(method,", "is a User method. :return: \"\"\" args = { 'market': market } return", "return self.request('tickers' + '/' + market) def get_order_book(self, market): \"\"\" Get order book", "in [200, 201, 202]: raise APIError(response.reason) return result def _generate_signature(self, method, path, args):", "get_user_account_info(self): \"\"\" Information about the User and Assets. This is a User method.", "method. :return: \"\"\" args = { 'market': market } return self.request('orders', args=args, is_user_method=True)", ":param market: :return: \"\"\" args = { 'market': market } return self.request('order_book', args=args)", "def get_recent_market_data(self, market): \"\"\" Get recent market data from server. :param market: :return:", "path sorted_values = sorted(args.items(), key=lambda val: val[0]) msg = method + '|' +", "self.secret_key = secret_key def get_server_time(self): \"\"\" Get the server time from server. :return:", "\"\"\" Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path:", "\"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active User Orders. This is", "'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key =", "# \"HTTP-verb|URI|params\" # HMAC can only handle ascii (byte) strings # https://bugs.python.org/issue5285 key", ":param args: :param method: :param is_user_method: :return: \"\"\" if args is None: args", "HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param args: :return: \"\"\" uri = '/'", "def cancel_order(self, order_id): \"\"\" Cancel order. This is a User method. :param order_id:", "= method + '|' + uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" #", "args): \"\"\" Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param", "get_orders(self, market): \"\"\" Active User Orders. This is a User method. :return: \"\"\"", "return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent market data from server. :param", "Order placing. This is a User method. :param side: 'buy' or 'sell' :param", "method. :param order_id: :return: \"\"\" args = { 'id': order_id } return self.request('order/delete',", "utf-8 -*- \"\"\"Main module.\"\"\" import hashlib import hmac import json import time import", "urllib.parse import urlencode except ImportError: from urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX", "import time import requests try: from urllib.parse import urlencode except ImportError: from urllib", "market: :return: \"\"\" args = { 'market': market } return self.request('trades/my', args=args, is_user_method=True)", "secret_key def get_server_time(self): \"\"\" Get the server time from server. :return: unix timestamp", ":return: unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent market", "import hmac import json import time import requests try: from urllib.parse import urlencode", "import json import time import requests try: from urllib.parse import urlencode except ImportError:", "dict) and result.get('error'): raise APIError(result) elif response.status_code not in [200, 201, 202]: raise", "raise APIError(response) result = response.json() if result and isinstance(result, dict) and result.get('error'): raise", "a User method. :param order_id: :return: \"\"\" args = { 'id': order_id }", "secret_key=None): self.access_key = access_key self.secret_key = secret_key def get_server_time(self): \"\"\" Get the server", ":param path: :param args: :return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX + '/'", "= response.json() if result and isinstance(result, dict) and result.get('error'): raise APIError(result) elif response.status_code", "self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get trades history data from server. :param", "path: :param args: :param method: :param is_user_method: :return: \"\"\" if args is None:", "def get_order_book(self, market): \"\"\" Get order book data from server. :param market: :return:", "+ '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only handle ascii (byte)", "from server. :param market: :return: \"\"\" return self.request('tickers' + '/' + market) def", "args['access_key'] = self.access_key args['tonce'] = int(time.time() * 1000) args['signature'] = self._generate_signature(method, path, args)", "side, 'volume': volume, 'market': market, 'price': price } return self.request('orders', args=args, method='POST', is_user_method=True)", "with the given arguments. :param path: :param args: :param method: :param is_user_method: :return:", "'/' + KUNA_API_URL_PREFIX + '/' + path sorted_values = sorted(args.items(), key=lambda val: val[0])", "'market': market } return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get trades history", "get_trade_history(self, market): \"\"\" User trade history This is a User method. :param market:", "self.request('tickers' + '/' + market) def get_order_book(self, market): \"\"\" Get order book data", "\"\"\" Get order book data from server. :param market: :return: \"\"\" args =", "self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about the User and Assets. This is", "'/' + path sorted_values = sorted(args.items(), key=lambda val: val[0]) msg = method +", "args = { 'market': market } return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information", "def get_trades_history(self, market): \"\"\" Get trades history data from server. :param market: :return:", "MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah']", "'|' + uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only", "given path in the Kuna API. We translate args to a valid query", "arguments. :param path: :param args: :param method: :param is_user_method: :return: \"\"\" if args", "'2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah',", "from urllib.parse import urlencode except ImportError: from urllib import urlencode API_VERSION = '2'", "market): \"\"\" Get recent market data from server. :param market: :return: \"\"\" return", "args = { 'market': market } return self.request('trades/my', args=args, is_user_method=True) def request(self, path,", "self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active User Orders. This is a User", "This is a User method. :return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market):", "+ market) def get_order_book(self, market): \"\"\" Get order book data from server. :param", "'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc',", "json.loads(e.read()) raise APIError(response) result = response.json() if result and isinstance(result, dict) and result.get('error'):", "'buy' or 'sell' :param volume: volume in BTC :param market: option from VALID_MARKET_DATA_PAIRS", "and isinstance(result, dict) and result.get('error'): raise APIError(result) elif response.status_code not in [200, 201,", "except ImportError: from urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL", "urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only handle ascii (byte) strings # https://bugs.python.org/issue5285", "} return self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume, market, price): \"\"\" Order", "is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the given path in", "isinstance(result, dict) and result.get('error'): raise APIError(result) elif response.status_code not in [200, 201, 202]:", "\"\"\" Fetches the given path in the Kuna API. We translate args to", "'market': market } return self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume, market, price):", "coding: utf-8 -*- \"\"\"Main module.\"\"\" import hashlib import hmac import json import time", "= self._generate_signature(method, path, args) try: response = requests.request( method, KUNA_API_BASEURL + path, params=args)", "= { 'market': market } return self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume,", "return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about the User and Assets. This", ":param market: :return: \"\"\" return self.request('tickers' + '/' + market) def get_order_book(self, market):", "return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User trade history This", "requests.RequestException as e: response = json.loads(e.read()) raise APIError(response) result = response.json() if result", "market) def get_order_book(self, market): \"\"\" Get order book data from server. :param market:", "path with the given arguments. :param path: :param args: :param method: :param is_user_method:", "User method. :param order_id: :return: \"\"\" args = { 'id': order_id } return", "= ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class", "API. We translate args to a valid query string. If post_args is given,", "+ uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only handle", "{ 'side': side, 'volume': volume, 'market': market, 'price': price } return self.request('orders', args=args,", "'sell' :param volume: volume in BTC :param market: option from VALID_MARKET_DATA_PAIRS :param price:", "response.status_code not in [200, 201, 202]: raise APIError(response.reason) return result def _generate_signature(self, method,", "market): \"\"\" Active User Orders. This is a User method. :return: \"\"\" args", "+ '/' + market) def get_order_book(self, market): \"\"\" Get order book data from", "'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc',", "= { 'market': market } return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about", "order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User trade", "self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent market data from server. :param market:", "get_server_time(self): \"\"\" Get the server time from server. :return: unix timestamp \"\"\" return", "args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User trade history This is a", "to a valid query string. If post_args is given, we send a POST", "= { 'market': market } return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get", "{ 'market': market } return self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None, method='GET',", "= 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah',", "API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah',", "elif response.status_code not in [200, 201, 202]: raise APIError(response.reason) return result def _generate_signature(self,", ":param market: :return: \"\"\" args = { 'market': market } return self.request('trades/my', args=args,", "is given, we send a POST request to the given path with the", "access_key=None, secret_key=None): self.access_key = access_key self.secret_key = secret_key def get_server_time(self): \"\"\" Get the", "<reponame>kazanzhy/kuna # -*- coding: utf-8 -*- \"\"\"Main module.\"\"\" import hashlib import hmac import", "__init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key = secret_key def get_server_time(self): \"\"\" Get", "__init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\") except: self.message = result", "urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah',", "\"\"\" args = { 'market': market } return self.request('trades', args=args) def get_user_account_info(self): \"\"\"", "cancel_order(self, order_id): \"\"\" Cancel order. This is a User method. :param order_id: :return:", "a POST request to the given path with the given arguments. :param path:", "\"\"\" Get trades history data from server. :param market: :return: \"\"\" args =", "args: :return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX + '/' + path sorted_values", "\"\"\" args = { 'market': market } return self.request('order_book', args=args) def get_trades_history(self, market):", "market): \"\"\" Get order book data from server. :param market: :return: \"\"\" args", "https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception):", "method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order. This is a User method.", "User and Assets. This is a User method. :return: \"\"\" return self.request('members/me', is_user_method=True)", "msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code =", "MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key = secret_key", "= secret_key def get_server_time(self): \"\"\" Get the server time from server. :return: unix", "method: :param is_user_method: :return: \"\"\" if args is None: args = dict() if", "= 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah',", "args=args, is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the given path", "side: 'buy' or 'sell' :param volume: volume in BTC :param market: option from", ":param path: :param args: :param method: :param is_user_method: :return: \"\"\" if args is", "KUNA_API_BASEURL + path, params=args) except requests.RequestException as e: response = json.loads(e.read()) raise APIError(response)", "args=None, method='GET', is_user_method=False): \"\"\" Fetches the given path in the Kuna API. We", "in the Kuna API. We translate args to a valid query string. If", "(byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg,", "KUNA_API_URL_PREFIX + '/' + path sorted_values = sorted(args.items(), key=lambda val: val[0]) msg =", "\"HTTP-verb|URI|params\" # HMAC can only handle ascii (byte) strings # https://bugs.python.org/issue5285 key =", "hashlib import hmac import json import time import requests try: from urllib.parse import", "try: from urllib.parse import urlencode except ImportError: from urllib import urlencode API_VERSION =", "args=args, is_user_method=True) def put_order(self, side, volume, market, price): \"\"\" Order placing. This is", "= msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try: self.message", "import urlencode except ImportError: from urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX =", "hmac import json import time import requests try: from urllib.parse import urlencode except", ":param market: :return: \"\"\" args = { 'market': market } return self.request('trades', args=args)", "202]: raise APIError(response.reason) return result def _generate_signature(self, method, path, args): \"\"\" Signature is", "handle ascii (byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return", ":return: \"\"\" args = { 'id': order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True)", "class APIError(Exception): def __init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\") except:", "return result def _generate_signature(self, method, path, args): \"\"\" Signature is generated by an", "= access_key self.secret_key = secret_key def get_server_time(self): \"\"\" Get the server time from", "+ \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key", "val[0]) msg = method + '|' + uri + '|' + urlencode(sorted_values) #", "This is a User method. :param order_id: :return: \"\"\" args = { 'id':", "Get the server time from server. :return: unix timestamp \"\"\" return self.request('timestamp') def", "args = { 'id': order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self,", "If post_args is given, we send a POST request to the given path", "\"\"\" Get recent market data from server. :param market: :return: \"\"\" return self.request('tickers'", "import hashlib import hmac import json import time import requests try: from urllib.parse", "User method. :param market: :return: \"\"\" args = { 'market': market } return", "return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active User Orders. This is a", "args = { 'side': side, 'volume': volume, 'market': market, 'price': price } return", "201, 202]: raise APIError(response.reason) return result def _generate_signature(self, method, path, args): \"\"\" Signature", "we send a POST request to the given path with the given arguments.", "a User method. :return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active", "User method. :return: \"\"\" args = { 'market': market } return self.request('orders', args=args,", "+ path sorted_values = sorted(args.items(), key=lambda val: val[0]) msg = method + '|'", "'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc',", "method='GET', is_user_method=False): \"\"\" Fetches the given path in the Kuna API. We translate", "\"\"\" Order placing. This is a User method. :param side: 'buy' or 'sell'", "is_user_method=False): \"\"\" Fetches the given path in the Kuna API. We translate args", "self.request('order/delete', args=args, method='POST', is_user_method=True) def get_trade_history(self, market): \"\"\" User trade history This is", "= self.access_key args['tonce'] = int(time.time() * 1000) args['signature'] = self._generate_signature(method, path, args) try:", "int(time.time() * 1000) args['signature'] = self._generate_signature(method, path, args) try: response = requests.request( method,", "HMAC can only handle ascii (byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg", "self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the given", "response = json.loads(e.read()) raise APIError(response) result = response.json() if result and isinstance(result, dict)", "\"\"\" args = { 'market': market } return self.request('trades/my', args=args, is_user_method=True) def request(self,", "is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time() * 1000) args['signature'] = self._generate_signature(method, path,", "get_trades_history(self, market): \"\"\" Get trades history data from server. :param market: :return: \"\"\"", "args=args) def get_user_account_info(self): \"\"\" Information about the User and Assets. This is a", "'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah',", "server time from server. :return: unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market):", "server. :return: unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self, market): \"\"\" Get recent", "\"\"\" args = { 'side': side, 'volume': volume, 'market': market, 'price': price }", "['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS =", "POST request to the given path with the given arguments. :param path: :param", "args = { 'market': market } return self.request('orders', args=args, is_user_method=True) def put_order(self, side,", "order. This is a User method. :param order_id: :return: \"\"\" args = {", "an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param args: :return: \"\"\" uri", "1000) args['signature'] = self._generate_signature(method, path, args) try: response = requests.request( method, KUNA_API_BASEURL +", "self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume, market, price): \"\"\" Order placing. This", ":return: \"\"\" if args is None: args = dict() if is_user_method: args['access_key'] =", ":param method: :param path: :param args: :return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX", "def _generate_signature(self, method, path, args): \"\"\" Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\",", "'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key = access_key", "a User method. :return: \"\"\" args = { 'market': market } return self.request('orders',", "only handle ascii (byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii')", "put_order(self, side, volume, market, price): \"\"\" Order placing. This is a User method.", "-*- \"\"\"Main module.\"\"\" import hashlib import hmac import json import time import requests", "_generate_signature(self, method, path, args): \"\"\" Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key))", "return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get trades history data from server.", "BTC :param market: option from VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC :return:", ":param market: option from VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC :return: \"\"\"", "is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param args:", "args) try: response = requests.request( method, KUNA_API_BASEURL + path, params=args) except requests.RequestException as", "args['tonce'] = int(time.time() * 1000) args['signature'] = self._generate_signature(method, path, args) try: response =", "requests.request( method, KUNA_API_BASEURL + path, params=args) except requests.RequestException as e: response = json.loads(e.read())", "return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try: self.message = result[\"error\"][\"message\"]", "'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self,", "Get order book data from server. :param market: :return: \"\"\" args = {", "'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg',", "trades history data from server. :param market: :return: \"\"\" args = { 'market':", "is_user_method=True) def put_order(self, side, volume, market, price): \"\"\" Order placing. This is a", ":param is_user_method: :return: \"\"\" if args is None: args = dict() if is_user_method:", "server. :param market: :return: \"\"\" args = { 'market': market } return self.request('trades',", "access_key self.secret_key = secret_key def get_server_time(self): \"\"\" Get the server time from server.", "placing. This is a User method. :param side: 'buy' or 'sell' :param volume:", "if result and isinstance(result, dict) and result.get('error'): raise APIError(result) elif response.status_code not in", "is a User method. :param order_id: :return: \"\"\" args = { 'id': order_id", ":return: \"\"\" args = { 'market': market } return self.request('order_book', args=args) def get_trades_history(self,", "User method. :param side: 'buy' or 'sell' :param volume: volume in BTC :param", "'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc',", "'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def", "path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the given path in the Kuna API.", "is a User method. :return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\"", "request to the given path with the given arguments. :param path: :param args:", "def get_user_account_info(self): \"\"\" Information about the User and Assets. This is a User", "\"\"\" args = { 'id': order_id } return self.request('order/delete', args=args, method='POST', is_user_method=True) def", "query string. If post_args is given, we send a POST request to the", "book data from server. :param market: :return: \"\"\" args = { 'market': market", "This is a User method. :param market: :return: \"\"\" args = { 'market':", "get_recent_market_data(self, market): \"\"\" Get recent market data from server. :param market: :return: \"\"\"", "KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key = secret_key def get_server_time(self):", "KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah',", "by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param args: :return: \"\"\"", "uri = '/' + KUNA_API_URL_PREFIX + '/' + path sorted_values = sorted(args.items(), key=lambda", "trade history This is a User method. :param market: :return: \"\"\" args =", "method: :param path: :param args: :return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX +", "+ path, params=args) except requests.RequestException as e: response = json.loads(e.read()) raise APIError(response) result", "price } return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order.", "path, args) try: response = requests.request( method, KUNA_API_BASEURL + path, params=args) except requests.RequestException", "return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self, order_id): \"\"\" Cancel order. This is", "given, we send a POST request to the given path with the given", "is a User method. :param side: 'buy' or 'sell' :param volume: volume in", "time import requests try: from urllib.parse import urlencode except ImportError: from urllib import", "about the User and Assets. This is a User method. :return: \"\"\" return", "server. :param market: :return: \"\"\" args = { 'market': market } return self.request('order_book',", "order book data from server. :param market: :return: \"\"\" args = { 'market':", "ImportError: from urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL =", "the Kuna API. We translate args to a valid query string. If post_args", "{ 'market': market } return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get trades", "Fetches the given path in the Kuna API. We translate args to a", "except requests.RequestException as e: response = json.loads(e.read()) raise APIError(response) result = response.json() if", "Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param", "'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key", "{ 'market': market } return self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume, market,", "'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] +", "def get_server_time(self): \"\"\" Get the server time from server. :return: unix timestamp \"\"\"", "return self.request('orders', args=args, is_user_method=True) def put_order(self, side, volume, market, price): \"\"\" Order placing.", "# HMAC can only handle ascii (byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii')", "\"\"\" return self.request('tickers' + '/' + market) def get_order_book(self, market): \"\"\" Get order", "market: option from VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC :return: \"\"\" args", ":return: \"\"\" args = { 'market': market } return self.request('orders', args=args, is_user_method=True) def", "result and isinstance(result, dict) and result.get('error'): raise APIError(result) elif response.status_code not in [200,", "def __init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key = secret_key def get_server_time(self): \"\"\"", "'/' + market) def get_order_book(self, market): \"\"\" Get order book data from server.", "a valid query string. If post_args is given, we send a POST request", "algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param args: :return: \"\"\" uri =", "* 1000) args['signature'] = self._generate_signature(method, path, args) try: response = requests.request( method, KUNA_API_BASEURL", "+ urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can only handle ascii (byte) strings #", "the given path with the given arguments. :param path: :param args: :param method:", "class KunaAPI(object): def __init__(self, access_key=None, secret_key=None): self.access_key = access_key self.secret_key = secret_key def", "strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest()", "'market': market } return self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False):", ":return: \"\"\" args = { 'market': market } return self.request('trades/my', args=args, is_user_method=True) def", "} return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\" Get trades history data from", "def __init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\") except: self.message =", "urlencode except ImportError: from urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION)", "valid query string. If post_args is given, we send a POST request to", "args is None: args = dict() if is_user_method: args['access_key'] = self.access_key args['tonce'] =", "-*- coding: utf-8 -*- \"\"\"Main module.\"\"\" import hashlib import hmac import json import", "hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code = result[\"error\"].get(\"code\")", "= self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self,", "method, KUNA_API_BASEURL + path, params=args) except requests.RequestException as e: response = json.loads(e.read()) raise", "ascii (byte) strings # https://bugs.python.org/issue5285 key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key,", "history data from server. :param market: :return: \"\"\" args = { 'market': market", "\"\"\" Get the server time from server. :return: unix timestamp \"\"\" return self.request('timestamp')", "{ 'market': market } return self.request('trades', args=args) def get_user_account_info(self): \"\"\" Information about the", "secret_key)) :param method: :param path: :param args: :return: \"\"\" uri = '/' +", "args = { 'market': market } return self.request('order_book', args=args) def get_trades_history(self, market): \"\"\"", "Orders. This is a User method. :return: \"\"\" args = { 'market': market", "response = requests.request( method, KUNA_API_BASEURL + path, params=args) except requests.RequestException as e: response", "volume, 'market': market, 'price': price } return self.request('orders', args=args, method='POST', is_user_method=True) def cancel_order(self,", "None: args = dict() if is_user_method: args['access_key'] = self.access_key args['tonce'] = int(time.time() *", "from urllib import urlencode API_VERSION = '2' KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION) KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX)", "= ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah', 'eosuah', 'tusduah', 'wavesuah'] VALID_MARKET_DATA_PAIRS", "the server time from server. :return: unix timestamp \"\"\" return self.request('timestamp') def get_recent_market_data(self,", "Kuna API. We translate args to a valid query string. If post_args is", "Information about the User and Assets. This is a User method. :return: \"\"\"", "price for 1 BTC :return: \"\"\" args = { 'side': side, 'volume': volume,", "a User method. :param market: :return: \"\"\" args = { 'market': market }", "is a User method. :param market: :return: \"\"\" args = { 'market': market", "order_id: :return: \"\"\" args = { 'id': order_id } return self.request('order/delete', args=args, method='POST',", "string. If post_args is given, we send a POST request to the given", "generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method: :param path: :param args: :return:", "def get_trade_history(self, market): \"\"\" User trade history This is a User method. :param", "self._generate_signature(method, path, args) try: response = requests.request( method, KUNA_API_BASEURL + path, params=args) except", "key = self.secret_key.encode('ascii') msg = msg.encode('ascii') return hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def", "+ '|' + uri + '|' + urlencode(sorted_values) # \"HTTP-verb|URI|params\" # HMAC can", "= { 'side': side, 'volume': volume, 'market': market, 'price': price } return self.request('orders',", "'volume': volume, 'market': market, 'price': price } return self.request('orders', args=args, method='POST', is_user_method=True) def", "key=lambda val: val[0]) msg = method + '|' + uri + '|' +", "Cancel order. This is a User method. :param order_id: :return: \"\"\" args =", "KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX) MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah',", "given arguments. :param path: :param args: :param method: :param is_user_method: :return: \"\"\" if", "VALID_MARKET_DATA_PAIRS :param price: price for 1 BTC :return: \"\"\" args = { 'side':", "1 BTC :return: \"\"\" args = { 'side': side, 'volume': volume, 'market': market,", "path, args): \"\"\" Signature is generated by an algorithm HEX(HMAC-SHA256(\"HTTP-verb|URI|params\", secret_key)) :param method:", "method. :return: \"\"\" return self.request('members/me', is_user_method=True) def get_orders(self, market): \"\"\" Active User Orders.", "args to a valid query string. If post_args is given, we send a", "User Orders. This is a User method. :return: \"\"\" args = { 'market':", "'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \\ MARKET_PAIRS_TO_GRYVNA class KunaAPI(object): def __init__(self, access_key=None,", "User trade history This is a User method. :param market: :return: \"\"\" args", "result def _generate_signature(self, method, path, args): \"\"\" Signature is generated by an algorithm", "method. :param market: :return: \"\"\" args = { 'market': market } return self.request('trades/my',", "\"\"\" args = { 'market': market } return self.request('orders', args=args, is_user_method=True) def put_order(self,", "return self.request('trades/my', args=args, is_user_method=True) def request(self, path, args=None, method='GET', is_user_method=False): \"\"\" Fetches the", ":return: \"\"\" return self.request('tickers' + '/' + market) def get_order_book(self, market): \"\"\" Get", "hmac.new(key, msg, hashlib.sha256).hexdigest() class APIError(Exception): def __init__(self, result): try: self.message = result[\"error\"][\"message\"] self.code", "recent market data from server. :param market: :return: \"\"\" return self.request('tickers' + '/'", ":return: \"\"\" uri = '/' + KUNA_API_URL_PREFIX + '/' + path sorted_values =", "= '/' + KUNA_API_URL_PREFIX + '/' + path sorted_values = sorted(args.items(), key=lambda val:", ":return: \"\"\" args = { 'side': side, 'volume': volume, 'market': market, 'price': price", "market data from server. :param market: :return: \"\"\" return self.request('tickers' + '/' +", "is_user_method: :return: \"\"\" if args is None: args = dict() if is_user_method: args['access_key']", "'side': side, 'volume': volume, 'market': market, 'price': price } return self.request('orders', args=args, method='POST',", ":param order_id: :return: \"\"\" args = { 'id': order_id } return self.request('order/delete', args=args,", "Get recent market data from server. :param market: :return: \"\"\" return self.request('tickers' +" ]
[ "content = f.readline() element_pattern = \"(\\w+)\" while content: for key in keys: if", "those elements # that have at least one attribute that corresponds to a", "def corresponding_elements(xml_path, attrs): elements = set() keys = attrs.keys() try: f = open(xml_path,", "that returns those elements # that have at least one attribute that corresponds", "function from the previous exercise that returns those elements # that have at", "Exception as e: print(e) return list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true',", "as e: print(e) return list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true', 'fast':", "= attrs.keys() try: f = open(xml_path, \"r\") content = f.readline() element_pattern = \"(\\w+)\"", "of the function from the previous exercise that returns those elements # that", "pair in the dictionary. import re def corresponding_elements(xml_path, attrs): elements = set() keys", "corresponding_elements(xml_path, attrs): elements = set() keys = attrs.keys() try: f = open(xml_path, \"r\")", "a key-value pair in the dictionary. import re def corresponding_elements(xml_path, attrs): elements =", "= set() keys = attrs.keys() try: f = open(xml_path, \"r\") content = f.readline()", "\"r\") content = f.readline() element_pattern = \"(\\w+)\" while content: for key in keys:", "that corresponds to a key-value pair in the dictionary. import re def corresponding_elements(xml_path,", "keys: if re.search(key, content) and re.search(attrs[key], content): result = re.search(element_pattern, content) if result:", "keys = attrs.keys() try: f = open(xml_path, \"r\") content = f.readline() element_pattern =", "except Exception as e: print(e) return list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations':", "while content: for key in keys: if re.search(key, content) and re.search(attrs[key], content): result", "re.search(element_pattern, content) if result: elements.add(result.group(0)) content = f.readline() f.close() except Exception as e:", "the function from the previous exercise that returns those elements # that have", "= open(xml_path, \"r\") content = f.readline() element_pattern = \"(\\w+)\" while content: for key", "return list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary", "f = open(xml_path, \"r\") content = f.readline() element_pattern = \"(\\w+)\" while content: for", "f.readline() f.close() except Exception as e: print(e) return list(elements) price_attributes_dictionary = { 'coin':", "have at least one attribute that corresponds to a key-value pair in the", "if result: elements.add(result.group(0)) content = f.readline() f.close() except Exception as e: print(e) return", "{ 'coin': 'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary = { 'detailed': 'true'", "elements # that have at least one attribute that corresponds to a key-value", "print(e) return list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true', 'fast': 'true' }", "previous exercise that returns those elements # that have at least one attribute", "for key in keys: if re.search(key, content) and re.search(attrs[key], content): result = re.search(element_pattern,", "content) if result: elements.add(result.group(0)) content = f.readline() f.close() except Exception as e: print(e)", "open(xml_path, \"r\") content = f.readline() element_pattern = \"(\\w+)\" while content: for key in", "that have at least one attribute that corresponds to a key-value pair in", "to a key-value pair in the dictionary. import re def corresponding_elements(xml_path, attrs): elements", "content): result = re.search(element_pattern, content) if result: elements.add(result.group(0)) content = f.readline() f.close() except", "'true', 'fast': 'true' } details_attributes_dictionary = { 'detailed': 'true' } print(corresponding_elements(\"menu.xml\", price_attributes_dictionary)) print(corresponding_elements(\"menu.xml\",", "# that have at least one attribute that corresponds to a key-value pair", "attrs.keys() try: f = open(xml_path, \"r\") content = f.readline() element_pattern = \"(\\w+)\" while", "= { 'coin': 'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary = { 'detailed':", "= f.readline() element_pattern = \"(\\w+)\" while content: for key in keys: if re.search(key,", "result: elements.add(result.group(0)) content = f.readline() f.close() except Exception as e: print(e) return list(elements)", "import re def corresponding_elements(xml_path, attrs): elements = set() keys = attrs.keys() try: f", "re.search(key, content) and re.search(attrs[key], content): result = re.search(element_pattern, content) if result: elements.add(result.group(0)) content", "attribute that corresponds to a key-value pair in the dictionary. import re def", "'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary = { 'detailed': 'true' } print(corresponding_elements(\"menu.xml\", price_attributes_dictionary))", "from the previous exercise that returns those elements # that have at least", "\"(\\w+)\" while content: for key in keys: if re.search(key, content) and re.search(attrs[key], content):", "content: for key in keys: if re.search(key, content) and re.search(attrs[key], content): result =", "result = re.search(element_pattern, content) if result: elements.add(result.group(0)) content = f.readline() f.close() except Exception", "least one attribute that corresponds to a key-value pair in the dictionary. import", "corresponds to a key-value pair in the dictionary. import re def corresponding_elements(xml_path, attrs):", "in keys: if re.search(key, content) and re.search(attrs[key], content): result = re.search(element_pattern, content) if", "one attribute that corresponds to a key-value pair in the dictionary. import re", "in the dictionary. import re def corresponding_elements(xml_path, attrs): elements = set() keys =", "'coin': 'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary = { 'detailed': 'true' }", "'fast': 'true' } details_attributes_dictionary = { 'detailed': 'true' } print(corresponding_elements(\"menu.xml\", price_attributes_dictionary)) print(corresponding_elements(\"menu.xml\", details_attributes_dictionary))", "elements = set() keys = attrs.keys() try: f = open(xml_path, \"r\") content =", "re def corresponding_elements(xml_path, attrs): elements = set() keys = attrs.keys() try: f =", "elements.add(result.group(0)) content = f.readline() f.close() except Exception as e: print(e) return list(elements) price_attributes_dictionary", "at least one attribute that corresponds to a key-value pair in the dictionary.", "list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary =", "= \"(\\w+)\" while content: for key in keys: if re.search(key, content) and re.search(attrs[key],", "'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary = { 'detailed': 'true' } print(corresponding_elements(\"menu.xml\",", "element_pattern = \"(\\w+)\" while content: for key in keys: if re.search(key, content) and", "Write another variant of the function from the previous exercise that returns those", "re.search(attrs[key], content): result = re.search(element_pattern, content) if result: elements.add(result.group(0)) content = f.readline() f.close()", "key-value pair in the dictionary. import re def corresponding_elements(xml_path, attrs): elements = set()", "content) and re.search(attrs[key], content): result = re.search(element_pattern, content) if result: elements.add(result.group(0)) content =", "price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true', 'fast': 'true' } details_attributes_dictionary = {", "= f.readline() f.close() except Exception as e: print(e) return list(elements) price_attributes_dictionary = {", "key in keys: if re.search(key, content) and re.search(attrs[key], content): result = re.search(element_pattern, content)", "e: print(e) return list(elements) price_attributes_dictionary = { 'coin': 'euros', 'recommendations': 'true', 'fast': 'true'", "if re.search(key, content) and re.search(attrs[key], content): result = re.search(element_pattern, content) if result: elements.add(result.group(0))", "returns those elements # that have at least one attribute that corresponds to", "attrs): elements = set() keys = attrs.keys() try: f = open(xml_path, \"r\") content", "f.close() except Exception as e: print(e) return list(elements) price_attributes_dictionary = { 'coin': 'euros',", "dictionary. import re def corresponding_elements(xml_path, attrs): elements = set() keys = attrs.keys() try:", "f.readline() element_pattern = \"(\\w+)\" while content: for key in keys: if re.search(key, content)", "content = f.readline() f.close() except Exception as e: print(e) return list(elements) price_attributes_dictionary =", "and re.search(attrs[key], content): result = re.search(element_pattern, content) if result: elements.add(result.group(0)) content = f.readline()", "another variant of the function from the previous exercise that returns those elements", "the dictionary. import re def corresponding_elements(xml_path, attrs): elements = set() keys = attrs.keys()", "the previous exercise that returns those elements # that have at least one", "try: f = open(xml_path, \"r\") content = f.readline() element_pattern = \"(\\w+)\" while content:", "set() keys = attrs.keys() try: f = open(xml_path, \"r\") content = f.readline() element_pattern", "exercise that returns those elements # that have at least one attribute that", "variant of the function from the previous exercise that returns those elements #", "# Write another variant of the function from the previous exercise that returns", "= re.search(element_pattern, content) if result: elements.add(result.group(0)) content = f.readline() f.close() except Exception as" ]
[ "fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world',", "= affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi / 4 + np.random.random(3)", "assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform =", "as op import numpy as np import pytest import h5py import fsl.data.image as", "'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield", "import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5 from", "with tempdir.tempdir(): # field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield)", "x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace", "fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5 from .. import make_random_image", ".. import make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION", "x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform,", "make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi /", "= x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace ==", "_check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def", "gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type']", "fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform))", "gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform)", "xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi / 4 +", "with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref)", "assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f)", "* np.pi / 2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src,", "tempdir import fsl.transform.affine as affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear", "np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform", "10, 3), -np.pi / 4 + np.random.random(3) * np.pi / 2) src =", "h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'],", "import os.path as op import numpy as np import pytest import h5py import", "f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile", "img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] ==", "assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat)", "assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f)", "== 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type']", "dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src", "src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield =", "op import numpy as np import pytest import h5py import fsl.data.image as fslimage", "= op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile)", "'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field):", "ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world')", "np.random.random(3) * np.pi / 2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform,", "3), -np.pi / 4 + np.random.random(3) * np.pi / 2) src = fslimage.Image('src.nii')", "src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field must be", "group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert", "== field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with", "gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5',", "xform) _check_space( f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata',", "_check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__),", "== 'deformation' assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'],", "== x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] == 'affine'", "np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'],", "xform)) def _check_space(group, img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert", "<<EMAIL>> # import os.path as op import numpy as np import pytest import", "np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi / 4 + np.random.random(3) * np.pi", "fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5')", "def _check_space(group, img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'],", "as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield) _check_space( f['/A'], ref) _check_space(", "f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'],", "assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'],", "import numpy as np import pytest import h5py import fsl.data.image as fslimage import", "h5py import fsl.data.image as fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine as affine", "fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine as affine import fsl.transform.fnirt as fnirt", "'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir =", "np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType']", "wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert", "srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref =", "import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5 from .. import make_random_image def", "fsl.utils.tempdir as tempdir import fsl.transform.affine as affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear", "field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1,", "assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def", "xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii')", "nonlinear import fsl.transform.x5 as x5 from .. import make_random_image def _check_metadata(group): assert group.attrs['Format']", "from .. import make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] ==", "h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield) _check_space( f['/A'],", "== wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data))", "assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert", "gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type'] == 'linear'", "assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield) _check_space( f['/A'], ref) _check_space( f['/B'], src)", "'world', 'world') with tempdir.tempdir(): # field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield)", "pytest import h5py import fsl.data.image as fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine", "reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile,", "as nonlinear import fsl.transform.x5 as x5 from .. import make_random_image def _check_metadata(group): assert", "= np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type'] == 'image' assert", "os.path as op import numpy as np import pytest import h5py import fsl.data.image", "gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r')", "'r') as f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src)", "test_x5.py - # # Author: <NAME> <<EMAIL>> # import os.path as op import", "python # # test_x5.py - # # Author: <NAME> <<EMAIL>> # import os.path", "= fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref =", "def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform):", "wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with", "#!/usr/bin/env python # # test_x5.py - # # Author: <NAME> <<EMAIL>> # import", "test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10,", "tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi", "# field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield =", "tempdir.tempdir(): # field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield", "wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field must be world->world with", "xform): assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group,", "# Author: <NAME> <<EMAIL>> # import os.path as op import numpy as np", "def _check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform))", "src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref", "- # # Author: <NAME> <<EMAIL>> # import os.path as op import numpy", "np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3]))", "ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field must be world->world", "fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5 from .. import make_random_image def _check_metadata(group):", "'r') as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield) _check_space( f['/A'], ref)", "== wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] ==", "np import pytest import h5py import fsl.data.image as fslimage import fsl.utils.tempdir as tempdir", "fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5 from ..", "op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield", "_check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert", "fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir():", "x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform", "_check_space(group, img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3]))", "Author: <NAME> <<EMAIL>> # import os.path as op import numpy as np import", "_check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix'])", "import make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def", "xform, src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src)", "def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType xform =", "'deformation' assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat)", "= x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as", "-np.pi / 4 + np.random.random(3) * np.pi / 2) src = fslimage.Image('src.nii') ref", "must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert", "def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile =", "dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace ==", "== wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as", "x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f:", "== 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir", "gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType", "group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix'])", "gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data,", "assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert", "import fsl.transform.affine as affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear import", "fsl.transform.x5 as x5 from .. import make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT", "as affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as", "assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform =", "assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5',", "group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5():", "field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5')", "assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert", "np.pi / 2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref)", "# import os.path as op import numpy as np import pytest import h5py", "4 + np.random.random(3) * np.pi / 2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii')", "x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace", "'world') with tempdir.tempdir(): # field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5',", "datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz')", "ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert", "'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield =", "group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data))", "+ np.random.random(3) * np.pi / 2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5',", "x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace", "affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi / 4 + np.random.random(3) *", "f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5():", "_check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir,", "op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile =", "op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref)", "assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img):", "as f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space(", "2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc,", "img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType xform", "= fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field", "assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'], ref) def", "wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] == 'nonlinear'", "assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert", "np.random.randint(-10, 10, 3), -np.pi / 4 + np.random.random(3) * np.pi / 2) src", "/ 2) src = fslimage.Image('src.nii') ref = fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform,", "fsl.transform.affine as affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5", "np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type']", "assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform,", "ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile", "import fsl.data.image as fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine as affine import", "= nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field must be world->world with pytest.raises(x5.X5Error):", "xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type']", "= op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile", "import fsl.utils.tempdir as tempdir import fsl.transform.affine as affine import fsl.transform.fnirt as fnirt import", "assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType ==", "field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType xform = np.array(group['Matrix']) assert", "_check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space( f['/A'], src) _check_space( f['/B'], ref)", "= fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with", "wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield)", "img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'],", "x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert", "ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref)", "= op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src,", "_check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5,", "gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace", "pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert", "wdfield.refSpace assert gotdfield.deformationType == wdfield.deformationType assert np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f:", "'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir,", "= op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src =", "world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src) assert", "fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field must", "import pytest import h5py import fsl.data.image as fslimage import fsl.utils.tempdir as tempdir import", "assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[", "# # test_x5.py - # # Author: <NAME> <<EMAIL>> # import os.path as", "assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type'] ==", "== x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] == 'affine' gotxform = np.array(group['Matrix']) assert", "with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert f.attrs['Type'] == 'linear' _check_affine(f['/Transform'], xform) _check_space(", "_check_space( f['/A'], src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear')", "as x5 from .. import make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert", "with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3),", "op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile)", "import fsl.transform.x5 as x5 from .. import make_random_image def _check_metadata(group): assert group.attrs['Format'] ==", "'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz') src = fslimage.Image(srcfile) ref", "src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert", "= fslimage.Image(srcfile) ref = fslimage.Image(reffile) dfield = fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield,", "field.deformationType xform = np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir():", "gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r')", "# test_x5.py - # # Author: <NAME> <<EMAIL>> # import os.path as op", "# # Author: <NAME> <<EMAIL>> # import os.path as op import numpy as", "dfield = fnirt.readFnirt(dffile, src, ref) wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): #", "src) _check_space( f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile =", "3), np.random.randint(-10, 10, 3), -np.pi / 4 + np.random.random(3) * np.pi / 2)", "import h5py import fsl.data.image as fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine as", ":3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation'", "def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10,", "group.attrs['Type'] == 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def", "affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5", "/ 4 + np.random.random(3) * np.pi / 2) src = fslimage.Image('src.nii') ref =", "= np.array(group['Matrix']) assert np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii')", "= fslimage.Image('ref.nii') x5.writeLinearX5('linear.x5', xform, src, ref) gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform,", "make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group,", "as fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine as affine import fsl.transform.fnirt as", "'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir, 'src.nii.gz') reffile = op.join(datadir, 'ref.nii.gz')", "as tempdir import fsl.transform.affine as affine import fsl.transform.fnirt as fnirt import fsl.transform.nonlinear as", "as fnirt import fsl.transform.nonlinear as nonlinear import fsl.transform.x5 as x5 from .. import", "np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with h5py.File('linear.x5', 'r') as f: _check_metadata(f) assert", "_check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] == 'deformation' assert group.attrs['SubType'] == field.deformationType", "np.all(np.isclose(gotdfield.data, wdfield.data)) with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'],", "f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield) _check_space( f['/A'], ref) _check_space( f['/B'],", "test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz') srcfile = op.join(datadir,", "with h5py.File('nonlinear.x5', 'r') as f: assert f.attrs['Type'] == 'nonlinear' _check_metadata(f) _check_deformation(f['/Transform'], wdfield) _check_space(", "f['/B'], ref) def test_readWriteNonLinearX5(): datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear') dffile = op.join(datadir, 'displacementfield.nii.gz')", "5, 3), np.random.randint(-10, 10, 3), -np.pi / 4 + np.random.random(3) * np.pi /", "fsl.data.image as fslimage import fsl.utils.tempdir as tempdir import fsl.transform.affine as affine import fsl.transform.fnirt", "nonlinear.convertDeformationSpace(dfield, 'world', 'world') with tempdir.tempdir(): # field must be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5',", "make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3), np.random.randint(-10, 10, 3), -np.pi / 4", "numpy as np import pytest import h5py import fsl.data.image as fslimage import fsl.utils.tempdir", "img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert group.attrs['Type'] ==", "as np import pytest import h5py import fsl.data.image as fslimage import fsl.utils.tempdir as", "be world->world with pytest.raises(x5.X5Error): x5.writeNonLinearX5('nonlinear.x5', dfield) x5.writeNonLinearX5('nonlinear.x5', wdfield) gotdfield = x5.readNonLinearX5('nonlinear.x5') assert gotdfield.src.sameSpace(src)", "gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type'] == 'image'", "'affine' gotxform = np.array(group['Matrix']) assert np.all(np.isclose(gotxform, xform)) def _check_space(group, img): assert group.attrs['Type'] ==", "field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose( np.random.randint(1, 5, 3),", "gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5') assert np.all(np.isclose(gotxform, xform)) assert gotsrc.sameSpace(src) assert gotref.sameSpace(ref) with", "assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group, field): assert", "gotdfield.src.sameSpace(src) assert gotdfield.ref.sameSpace(ref) assert gotdfield.srcSpace == wdfield.srcSpace assert gotdfield.refSpace == wdfield.refSpace assert gotdfield.deformationType", "np.all(np.isclose(xform, field.data)) _check_affine(group['Mapping'], field.voxToWorldMat) def test_readWriteLinearX5(): with tempdir.tempdir(): make_random_image('src.nii') make_random_image('ref.nii') xform = affine.compose(", "group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type'] ==", "x5 from .. import make_random_image def _check_metadata(group): assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version']", "<NAME> <<EMAIL>> # import os.path as op import numpy as np import pytest", "== 'image' assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3])) assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3])) _check_affine(group['Mapping'], img.voxToWorldMat) def _check_deformation(group,", "assert group.attrs['Format'] == x5.X5_FORMAT assert group.attrs['Version'] == x5.X5_VERSION def _check_affine(group, xform): assert group.attrs['Type']" ]
[]
[ "Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0,", "_safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self,", "world): self.world = world self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size,", "= 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE:", "= tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill", "def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal =", "= Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse", "value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def", "not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def", "self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self):", "x2, y2, brush=Brushes.Gray): ln = Line() ln.X1 = x1 ln.Y1 = y1 ln.X2", "20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size =", "return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas)", "2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size)", "= False self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes =", "self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value):", "* row, self.world.width, size * row, brush) for col in range(1, int(self.world.width /", "TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else:", "= 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell", "2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return", "self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value)", "= 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform =", "render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape)", "TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math from animal import Gender class Renderer(object):", "bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance", "world): self.canvas = canvas self.world = world self.grid = ChunksGrid(world) self._draw_chunks = False", "_set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform", "self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food", "world self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen)", "size * col, self.world.height, brush) def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln", "RadialGradientBrush, Color import math from animal import Gender class Renderer(object): def __init__(self, canvas,", "for food in self.world.food: if not hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas)", "self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform", "self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else:", "self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self):", "self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else:", "return self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if", "self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size,", "-0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance", "def __init__(self, animal, renderer): self._draw_smell = False self._animal = animal self._renderer = renderer", "System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math from animal", "self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform", "self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas()", "= self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size =", "_create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for row", "+ self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property", "self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food()", "if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size =", "self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg", "bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove):", "if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance =", "not hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self):", "def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green", "_create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray", "RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def", "def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill", "smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell", "self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform", "self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas =", "def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush =", "RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2", "= None def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = [] def render(self):", "'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks", "color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width", "def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width = new_smell_size * 2", "= self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size)", "ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size)", "= ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance =", "draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def", "self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush()", "def __init__(self, food, renderer): self._food = food self._renderer = renderer self._create_shape() self._draw_smell =", "def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value) if value:", "= False self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes =", "range(1, int(self.world.height / size)+1): self._create_line(0, size * row, self.world.width, size * row, brush)", "* 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self):", "= Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse)", "Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for row in range(1,", "if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object):", "self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse()", "tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform", "= Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse)", "food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value)", "draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property", "= canvas self.world = world self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell =", "= TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform =", "def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 =", "def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance:", "ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math from animal import Gender class", "self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush):", "2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in self.world.food: if not hasattr(food, 'shape'):", "= Ellipse() color1 = Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0, 220, 0,", "= Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell", "__init__(self, food, renderer): self._food = food self._renderer = renderer self._create_shape() self._draw_smell = False", "self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush", "20) color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness =", "tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill =", "self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0, 0,", "self.animal_shapes = [] self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes", "= TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse()", "self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for", "self.world.food: if not hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property", "_remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food:", "= self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush", "ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def", "Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self):", "_create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln = Line() ln.X1 = x1 ln.Y1", "new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self):", "-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007", "= [] self.animal_shapes = [] self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes =", "selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold", "ChunksGrid(object): def __init__(self, world): self.world = world self.canvas = Canvas() self._create_grids() def _create_grids(self):", "self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance", "self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def _safe_add_to_canvas(canvas, element_to_add): if not canvas.Children.Contains(element_to_add):", "self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) *", "_create_grid(self, size, brush): for row in range(1, int(self.world.height / size)+1): self._create_line(0, size *", "= 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness =", "from System.Windows import Point from System.Windows.Shapes import * from System.Windows.Controls import Grid, Canvas", "self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220, 0, 20) color2", "False self.food_shapes = [] self.animal_shapes = [] self._selected_animal = None def restart(self): self.canvas.Children.Clear()", "= RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO *", "self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height", "in self.world.animals: if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2)", "self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if", "self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size", "animal in self.world.animals: if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas,", "= animal self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape()", "False self._animal = animal self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas =", "self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width", "y2 ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal,", "size)+1): self._create_line(0, size * row, self.world.width, size * row, brush) for col in", "= Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self,", "new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform =", "= ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size,", "draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas,", "def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas)", "False self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes = [] self._selected_animal = None", "= 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas", "import Point from System.Windows.Shapes import * from System.Windows.Controls import Grid, Canvas from System.Windows.Media", "self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas =", "def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in", "if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter", "def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value:", "TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1 =", "= FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def", "0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def", "-new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value):", "self.food_shapes = [] self.animal_shapes = [] self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes", "self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def _safe_add_to_canvas(canvas, element_to_add):", "1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender", "* col, 0, size * col, self.world.height, brush) def _create_line(self, x1, y1, x2,", "self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1", "self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals: if not hasattr(animal, 'shape'): animal.shape", "= 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell:", "self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220,", "self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size", "selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value", "update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance", "if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world = world", "bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food,", "if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter", "= x1 ln.Y1 = y1 ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness =", "Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke =", "Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size,", "Color import math from animal import Gender class Renderer(object): def __init__(self, canvas, world):", "self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size", "col, 0, size * col, self.world.height, brush) def _create_line(self, x1, y1, x2, y2,", "def _create_grid(self, size, brush): for row in range(1, int(self.world.height / size)+1): self._create_line(0, size", "self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def", "row, brush) for col in range(1, int(self.world.width / size)+1): self._create_line(size * col, 0,", "self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush =", "self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def", "= Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill =", "self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value):", "0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1", "self.food_shapes = [] self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def", "ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer):", "self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse =", "for col in range(1, int(self.world.width / size)+1): self._create_line(size * col, 0, size *", "False self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes = []", "def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in", "from animal import Gender class Renderer(object): def __init__(self, canvas, world): self.canvas = canvas", "value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def", "self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer): self._food = food", "color1 = Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill", "x1 ln.Y1 = y1 ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness = 0.2", "= TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def", "Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math from animal import Gender", "self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse", "body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width =", "[] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals:", "animal self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape()", "self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter", "self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer): self._food = food self._renderer = renderer", "Point from System.Windows.Shapes import * from System.Windows.Controls import Grid, Canvas from System.Windows.Media import", "else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance", "self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell = False self._animal = animal", "self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0, 220,", "!= self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform =", "= brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell = False self._animal", "= bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self,", "if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state()", "self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2", "__init__(self, canvas, world): self.canvas = canvas self.world = world self.grid = ChunksGrid(world) self._draw_chunks", "import * from System.Windows.Controls import Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform,", "@property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if", "0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width =", "self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def", "return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse)", "size * row, brush) for col in range(1, int(self.world.width / size)+1): self._create_line(size *", "0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray", "self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in self.world.food: if", "if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle)))", "self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush =", "self._create_line(size * col, 0, size * col, self.world.height, brush) def _create_line(self, x1, y1,", "renderer): self._food = food self._renderer = renderer self._create_shape() self._draw_smell = False self._draw_eat_distance =", "TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if", "else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def _safe_add_to_canvas(canvas, element_to_add): if", "self._food = food self._renderer = renderer self._create_shape() self._draw_smell = False self._draw_eat_distance = False", "System.Windows import Point from System.Windows.Shapes import * from System.Windows.Controls import Grid, Canvas from", "def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for", "draw_chunks(self, value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property", "row, self.world.width, size * row, brush) for col in range(1, int(self.world.width / size)+1):", "= Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke", "Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1", "color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self):", "self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup()", "self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world = world self.canvas", "Line() ln.X1 = x1 ln.Y1 = y1 ln.X2 = x2 ln.Y2 = y2", "self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for", "1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush", "def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform", "_safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer): self._food =", "= TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1", "TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self,", "1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse()", "Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for row in range(1, int(self.world.height /", "0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell !=", "ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell = False", "self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO", "self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1", "self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size)", "def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value:", "self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value):", "Brushes.DarkGreen) def _create_grid(self, size, brush): for row in range(1, int(self.world.height / size)+1): self._create_line(0,", "property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width = new_smell_size *", "= TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 =", "= Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height =", "self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self):", "return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas)", "= 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if", "def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width =", "System.Windows.Shapes import * from System.Windows.Controls import Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform,", "for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas)", "animal.shape.update_state() def _draw_food(self): for food in self.world.food: if not hasattr(food, 'shape'): food.shape =", "self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5)", "'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for", "in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals: if not hasattr(animal,", "self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal", "* col, self.world.height, brush) def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln =", "eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self,", "self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse", "self._draw_eat_distance = False def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self):", "from System.Windows.Controls import Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup,", "self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform =", "def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas)", "Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5", "= False def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas", "self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes", "* 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO )", "_create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line()", "food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals: if not", "class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell = False self._animal = animal self._renderer", "__init__(self, world): self.world = world self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray)", "animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape)", "draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class", "self._draw_smell = False self._animal = animal self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self):", "def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size,", "self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse", "@draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas,", "1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell", "def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln = Line() ln.X1 = x1", "size, brush): for row in range(1, int(self.world.height / size)+1): self._create_line(0, size * row,", "self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse()", "@property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value) if", "class ChunksGrid(object): def __init__(self, world): self.world = world self.canvas = Canvas() self._create_grids() def", "if not hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def", "Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell =", "= Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)", "self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush()", "col, self.world.height, brush) def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln = Line()", "* 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return", "self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for row in range(1, int(self.world.height / size)+1):", "Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import", "220, 0, 20) color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2)", "= False self._draw_eat_distance = False def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape()", "TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness", "draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas,", "0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size", "self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush =", "self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in self.world.food: if not hasattr(food, 'shape'): food.shape", "_create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill =", "self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness", "bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal", "def __init__(self, world): self.world = world self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size,", "_create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220, 0, 20)", "self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size", "= new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def", "= Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform", "= TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed", "= property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell =", "Renderer(object): def __init__(self, canvas, world): self.canvas = canvas self.world = world self.grid =", "self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self,", "color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03", "= bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if", "self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = [] def", "self.world.height, brush) def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln = Line() ln.X1", "= bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return", "size * row, self.world.width, size * row, brush) for col in range(1, int(self.world.width", "self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size *", "def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse)", "self.canvas = canvas self.world = world self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell", "0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5,", "size)+1): self._create_line(size * col, 0, size * col, self.world.height, brush) def _create_line(self, x1,", "if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer):", "self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height", "self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell:", "eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform =", "Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line = Line()", "0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell =", "def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas)", "def draw_chunks(self, value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas)", "= food self._renderer = renderer self._create_shape() self._draw_smell = False self._draw_eat_distance = False def", "= ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter", "None def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = [] def render(self): self._remove_dead_animals()", "self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell !=", "self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks =", "hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self):", "1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell", "import math from animal import Gender class Renderer(object): def __init__(self, canvas, world): self.canvas", "= Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width =", "0, 20) color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness", "220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray", "animal import Gender class Renderer(object): def __init__(self, canvas, world): self.canvas = canvas self.world", "tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y)", "self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def", "Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width", "= Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform =", "value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world =", "renderer self._create_shape() self._draw_smell = False self._draw_eat_distance = False def _create_shape(self): self.canvas = Canvas()", "= Line() ln.X1 = x1 ln.Y1 = y1 ln.X2 = x2 ln.Y2 =", "* 2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size =", "Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse,", "Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke =", "import Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color", "(self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y)", "math from animal import Gender class Renderer(object): def __init__(self, canvas, world): self.canvas =", "= TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def", "Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1", "= False self.food_shapes = [] self.animal_shapes = [] self._selected_animal = None def restart(self):", "= world self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell =", "2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell", "TransformGroup, RadialGradientBrush, Color import math from animal import Gender class Renderer(object): def __init__(self,", "self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5,", "renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas", "self._animal = animal self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas()", "food self._renderer = renderer self._create_shape() self._draw_smell = False self._draw_eat_distance = False def _create_shape(self):", "self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender ==", "in range(1, int(self.world.width / size)+1): self._create_line(size * col, 0, size * col, self.world.height,", "[] self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for", "= value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world", "self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform", "class Renderer(object): def __init__(self, canvas, world): self.canvas = canvas self.world = world self.grid", "Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5,", "import Gender class Renderer(object): def __init__(self, canvas, world): self.canvas = canvas self.world =", "self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def", "= self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO,", "= bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return", "col in range(1, int(self.world.width / size)+1): self._create_line(size * col, 0, size * col,", "TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x,", "= 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def", "self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove):", "TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self,", "self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self):", "False def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas =", "[] self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = []", "1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self):", "self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1)", "return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas)", "= self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg", "!= self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform", "in self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def", "FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self,", "self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1", "self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 =", "ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance = False", "1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0, 220, 20) color2", "renderer): self._draw_smell = False self._animal = animal self._renderer = renderer self._create_shape() self.update_state() def", "self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self):", "self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse =", "for row in range(1, int(self.world.height / size)+1): self._create_line(0, size * row, self.world.width, size", "= x2 ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class", "for animal in self.world.animals: if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas)", "self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self, world):", "self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg =", "self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE", "self.world = world self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell", "self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self):", "Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world = world self.canvas = Canvas() self._create_grids()", "= Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 =", "def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220, 0,", "property(fset=_set_smell_size) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value)", "y1 ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke = brush", "def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food()", "!= self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE +", "row in range(1, int(self.world.height / size)+1): self._create_line(0, size * row, self.world.width, size *", "self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke", "self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self):", "Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math", "AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in self.world.food:", "System.Windows.Controls import Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush,", "= 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO", "self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas()", "self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin", "self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals: if not hasattr(animal, 'shape'):", "self._renderer = renderer self._create_shape() self._draw_smell = False self._draw_eat_distance = False def _create_shape(self): self.canvas", "self.world.animals: if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape)", ") def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke", "_create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self):", "_safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def _safe_add_to_canvas(canvas, element_to_add): if not", "= [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self): for animal in", "Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse()", "self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1", "= world self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size,", "Gender class Renderer(object): def __init__(self, canvas, world): self.canvas = canvas self.world = world", "Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform =", "value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def", "_draw_animals(self): for animal in self.world.animals: if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal, self)", "Ellipse() color1 = Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0, 220, 0, 20)", "_draw_food(self): for food in self.world.food: if not hasattr(food, 'shape'): food.shape = FoodShape(food, self)", "-0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0,", "= False self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes = [] self._selected_animal =", "brush) for col in range(1, int(self.world.width / size)+1): self._create_line(size * col, 0, size", "Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._body_ellipse)", "x1, y1, x2, y2, brush=Brushes.Gray): ln = Line() ln.X1 = x1 ln.Y1 =", "self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1 self._angle_line.Y2", "= new_smell_size * 2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size)", "self._create_line(0, size * row, self.world.width, size * row, brush) for col in range(1,", "20) color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness =", "= False self._animal = animal self._renderer = renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas", "self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform", "value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter def", "_safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance =", "* from System.Windows.Controls import Grid, Canvas from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform,", "= 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse =", "def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness = 0.007 self._eat_distance_ellipse.Stroke =", "= [] self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes =", "animal.shape = AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food", "self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for row in range(1, int(self.world.height", "self._create_shape() self._draw_smell = False self._draw_eat_distance = False def _create_shape(self): self.canvas = Canvas() self._create_body_shape()", "AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell = False self._animal = animal self._renderer =", "set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green def", "def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse()", "= Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 =", "Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2", "brush) def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray): ln = Line() ln.X1 =", "self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def", "_remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals:", "0, size * col, self.world.height, brush) def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray):", "self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell =", "self._eat_distance_ellipse.Stroke = Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)", "Brushes.Gray self._eat_distance_ellipse.Height = 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def", "@selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush", "value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal: self._selected_animal.shape.body_brush = Brushes.Gold class", "food, renderer): self._food = food self._renderer = renderer self._create_shape() self._draw_smell = False self._draw_eat_distance", "self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush)", "brush): for row in range(1, int(self.world.height / size)+1): self._create_line(0, size * row, self.world.width,", "self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell", "from System.Windows.Shapes import * from System.Windows.Controls import Grid, Canvas from System.Windows.Media import Brushes,", "= 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform =", "new_smell_size * 2 self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size", "0, 220, 20) color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2)", "@draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas,", "value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas,", "-0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40,", "draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas,", "self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value)", "_create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1", "= property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width = new_smell_size", "self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform", "def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0, 220, 20) color2 =", "import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math from animal import", "_safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer): self._food = food self._renderer =", "__init__(self, animal, renderer): self._draw_smell = False self._animal = animal self._renderer = renderer self._create_shape()", "color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1", "self._animal.size)) tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def", "def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def _safe_add_to_canvas(canvas, element_to_add): if not canvas.Children.Contains(element_to_add): canvas.Children.Add(element_to_add)", "x2 ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object):", "else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1", "@property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if", "= Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5", "world self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell = False", "color1 = Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill", "self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value):", "False self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes = []", "food in self.world.food: if not hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape)", "y2, brush=Brushes.Gray): ln = Line() ln.X1 = x1 ln.Y1 = y1 ln.X2 =", "Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5", "Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0,", "/ size)+1): self._create_line(0, size * row, self.world.width, size * row, brush) for col", "self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals: if not hasattr(animal, 'shape'): animal.shape =", "self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height =", "TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell =", "value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer): self._food", "ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln)", "= self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas", "= Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height", "20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height =", "self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray", "Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1,", "@property def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal", "1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5,", "= RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.1 self._smell_ellipse.Stroke = Brushes.Gray self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse)", "self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0, 220, 20)", "self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance !=", "if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self):", "self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal", "= renderer self._create_shape() self.update_state() def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self):", "[] self.animal_shapes = [] self._selected_animal = None def restart(self): self.canvas.Children.Clear() self.food_shapes = []", "self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else:", "= Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1", "self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def", "restart(self): self.canvas.Children.Clear() self.food_shapes = [] self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals()", "= 1 self._eat_distance_ellipse.Width = 1 self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if", "new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2 self._smell_ellipse.Width", "self.smell_size = self._animal.smell_size self._smell_canvas.Children.Add(self._smell_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell", "self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter def draw_chunks(self, value): self._draw_chunks", "self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in self.world.food: if not hasattr(food,", "value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def", "brush=Brushes.Gray): ln = Line() ln.X1 = x1 ln.Y1 = y1 ln.X2 = x2", "= [] self.animal_shapes = [] def render(self): self._remove_dead_animals() self._remove_empty_food() self._draw_animals() self._draw_food() def _remove_dead_animals(self):", "def _draw_food(self): for food in self.world.food: if not hasattr(food, 'shape'): food.shape = FoodShape(food,", "canvas self.world = world self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False", "self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas)", "update_state(self): if self.draw_smell != self._renderer.draw_animal_smell: self.draw_smell = self._renderer.draw_animal_smell tg = TransformGroup() tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size))", "= 0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke =", "self.grid = ChunksGrid(world) self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance", "= Canvas() self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 220, 0, 20) color2 =", "TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40,", "canvas, world): self.canvas = canvas self.world = world self.grid = ChunksGrid(world) self._draw_chunks =", "self._selected_animal.shape.body_brush = Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world = world self.canvas =", "class FoodShape(object): def __init__(self, food, renderer): self._food = food self._renderer = renderer self._create_shape()", "self.world.dead_animals: self.canvas.Children.Remove(animal.shape.canvas) self.animal_shapes.remove(animal.shape) def _remove_empty_food(self): for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self):", "def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def", "_safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal:", "_safe_add_to_canvas(self._body_canvas, self._smell_ellipse) else: _safe_remove_from_canvas(self._body_canvas, self._smell_ellipse) @property def draw_eat_distance(self): return self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self,", "Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height =", "self._animal.y) def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size):", "= self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size) eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2", "False self._draw_eat_distance = False def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def", "new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size", "self._body_canvas.Children.Add(self._food_ellipse) self._body_canvas.SetZIndex(self._food_ellipse, 1) def _create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0, 220,", "220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height", "2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse", "Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1,", "ln.X1 = x1 ln.Y1 = y1 ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness", "0.5 self._angle_line.X2 = 1 self._angle_line.Y2 = 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black", "def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas()", "self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width =", "= renderer self._create_shape() self._draw_smell = False self._draw_eat_distance = False def _create_shape(self): self.canvas =", "self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform =", "= new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height = new_smell_size * 2", "else: _safe_remove_from_canvas(self.canvas, self.grid.canvas) @property def selected_animal(self): return self._selected_animal @selected_animal.setter def selected_animal(self, value): if", "_safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove) def _safe_add_to_canvas(canvas,", "/ size)+1): self._create_line(size * col, 0, size * col, self.world.height, brush) def _create_line(self,", "value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object):", "int(self.world.width / size)+1): self._create_line(size * col, 0, size * col, self.world.height, brush) def", "animal, renderer): self._draw_smell = False self._animal = animal self._renderer = renderer self._create_shape() self.update_state()", "in range(1, int(self.world.height / size)+1): self._create_line(0, size * row, self.world.width, size * row,", "= AnimalShape(animal, self) self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in", "self._draw_smell = False self._draw_eat_distance = False def _create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape()", "Ellipse() color1 = Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0, 0, 220, 20)", "self._draw_chunks = False self.draw_animal_smell = False self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes", "for food in self.world.empty_food: self.canvas.Children.Remove(food.shape.canvas) self.food_shapes.remove(food.shape) def _draw_animals(self): for animal in self.world.animals: if", "= TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell", "-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas() self._eat_distance_ellipse = Ellipse() self._eat_distance_ellipse.StrokeThickness =", "self._selected_animal @selected_animal.setter def selected_animal(self, value): if self._selected_animal: self._selected_animal.shape.set_default_body_brush() self._selected_animal = value if self._selected_animal:", "= y2 ln.StrokeThickness = 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self,", "def _draw_animals(self): for animal in self.world.animals: if not hasattr(animal, 'shape'): animal.shape = AnimalShape(animal,", "self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0)", "_create_angle_line(self): self._angle_line = Line() self._angle_line.X1 = 0.5 self._angle_line.Y1 = 0.5 self._angle_line.X2 = 1", "if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else: _safe_remove_from_canvas(self.canvas, self._eat_distance_canvas) def _safe_remove_from_canvas(canvas, element_to_remove): if canvas.Children.Contains(element_to_remove): canvas.Children.Remove(element_to_remove)", "self.world = world self.canvas = Canvas() self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red)", "= Canvas() self._create_body_shape() self._create_smell_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin =", "_create_shape(self): self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse()", "= Brushes.Gold class ChunksGrid(object): def __init__(self, world): self.world = world self.canvas = Canvas()", "Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas = Canvas() self._smell_ellipse =", "food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return self._draw_chunks @draw_chunks.setter", "self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes = [] self._selected_animal = None def", "ln = Line() ln.X1 = x1 ln.Y1 = y1 ln.X2 = x2 ln.Y2", "= Color.FromArgb(40, 220, 0, 20) color2 = Color.FromArgb(0, 220, 0, 20) self._smell_ellipse.Fill =", "brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell = False self._animal =", "hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state() @property def draw_chunks(self): return", "y1, x2, y2, brush=Brushes.Gray): ln = Line() ln.X1 = x1 ln.Y1 = y1", "0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line) def _create_smell_shape(self): self._smell_canvas =", "self.canvas = Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas)", "in self.world.food: if not hasattr(food, 'shape'): food.shape = FoodShape(food, self) self.canvas.Children.Add(food.shape.canvas) self.food_shapes.append(food.shape) food.shape.update_state()", "draw_smell(self): return self._draw_smell @draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self._body_canvas,", "tg.Children.Add(RotateTransform(math.degrees(self._animal.angle))) self._body_canvas.RenderTransform = tg self.smell_size = self._animal.smell_size self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y) def _set_body_brush(self,", "_set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height =", "@draw_smell.setter def draw_smell(self, value): self._draw_smell = bool(value) if value: _safe_add_to_canvas(self.canvas, self._smell_canvas) else: _safe_remove_from_canvas(self.canvas,", "* 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def _create_eat_distance_shape(self): self._eat_distance_canvas = Canvas()", "range(1, int(self.world.width / size)+1): self._create_line(size * col, 0, size * col, self.world.height, brush)", "= Ellipse() color1 = Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0, 0, 220,", "RotateTransform, TransformGroup, RadialGradientBrush, Color import math from animal import Gender class Renderer(object): def", "_create_smell_shape(self): self._smell_ellipse = Ellipse() color1 = Color.FromArgb(40, 0, 220, 20) color2 = Color.FromArgb(0,", "self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO", "== Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line =", "= Canvas() self._create_body_ellipse() self._create_angle_line() self._body_canvas.RenderTransformOrigin = Point(0, 0) self.canvas.Children.Add(self._body_canvas) def _create_body_ellipse(self): self._body_ellipse =", "_create_body_ellipse(self): self._body_ellipse = Ellipse() self.set_default_body_brush() self._body_ellipse.Height = 1 self._body_ellipse.Width = 1 self._body_ellipse.RenderTransform =", "@draw_chunks.setter def draw_chunks(self, value): self._draw_chunks = bool(value) if value: _safe_add_to_canvas(self.canvas, self.grid.canvas) else: _safe_remove_from_canvas(self.canvas,", "def _set_body_brush(self, new_brush): self._body_ellipse.Fill = new_brush body_brush = property(fset=_set_body_brush) def _set_smell_size(self, new_smell_size): self._smell_ellipse.Height", "self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush = Brushes.Green def _create_angle_line(self): self._angle_line", "0.03 self._smell_ellipse.Stroke = Brushes.Gray self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO *", "self.draw_food_smell = False self.draw_eat_distance = False self.food_shapes = [] self.animal_shapes = [] self._selected_animal", "self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush): for row in", "= Canvas() self._create_body_shape() self._create_smell_shape() self._create_eat_distance_shape() def _create_body_shape(self): self._body_canvas = Canvas() self._create_food_ellipse() self.canvas.Children.Add(self._body_canvas) def", "= TranslateTransform(-0.5, -0.5) self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse) def update_state(self): if self.draw_smell != self._renderer.draw_food_smell: self.draw_smell = self._renderer.draw_food_smell", "= (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2 self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x,", "2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform( -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO ) def", "self._draw_eat_distance @draw_eat_distance.setter def draw_eat_distance(self, value): self._draw_eat_distance = bool(value) if value: _safe_add_to_canvas(self.canvas, self._eat_distance_canvas) else:", "= Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness = 0.03 self._smell_ellipse.Stroke", "from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color import math from", "self.draw_smell = self._renderer.draw_food_smell if self.draw_eat_distance != self._renderer.draw_eat_distance: self.draw_eat_distance = self._renderer.draw_eat_distance self._body_canvas.RenderTransform = ScaleTransform(self._food.size,", "self.canvas.Children.Add(self._body_canvas) def _create_food_ellipse(self): self._food_ellipse = Ellipse() self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width", "self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2 self._smell_ellipse.RenderTransform = TranslateTransform(", "def __init__(self, canvas, world): self.canvas = canvas self.world = world self.grid = ChunksGrid(world)", "self.world.width, size * row, brush) for col in range(1, int(self.world.width / size)+1): self._create_line(size", "220, 20) color2 = Color.FromArgb(0, 0, 220, 20) self._smell_ellipse.Fill = RadialGradientBrush(color1, color2) self._smell_ellipse.StrokeThickness", "-0.5) self._body_canvas.Children.Add(self._body_ellipse) def set_default_body_brush(self): if self._animal.gender == Gender.FEMALE: self.body_brush = Brushes.DarkRed else: self.body_brush", "ln.Y1 = y1 ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke", "self._smell_ellipse.Width = new_smell_size * 2 self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size) smell_size = property(fset=_set_smell_size) @property", "else: _safe_remove_from_canvas(self.canvas, self._smell_canvas) class FoodShape(object): def __init__(self, food, renderer): self._food = food self._renderer", "FoodShape(object): def __init__(self, food, renderer): self._food = food self._renderer = renderer self._create_shape() self._draw_smell", "ScaleTransform(eat_distance_size, eat_distance_size) self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y) @property def draw_smell(self): return self._draw_smell @draw_smell.setter def", "self._create_grids() def _create_grids(self): self._create_grid(self.world.female_chunk_size, Brushes.Gray) self._create_grid(self.world.food_chunk_size, Brushes.Red) self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen) def _create_grid(self, size, brush):", "* row, brush) for col in range(1, int(self.world.width / size)+1): self._create_line(size * col,", "= 0.5 self._angle_line.StrokeThickness = 0.1 self._angle_line.Stroke = Brushes.Black self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5) self._body_canvas.Children.Add(self._angle_line)", "int(self.world.height / size)+1): self._create_line(0, size * row, self.world.width, size * row, brush) for", "self._food_ellipse.Fill = Brushes.Gray self._food_ellipse.Height = 1 self._food_ellipse.Width = 1 self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)", "self.canvas.Children.Add(animal.shape.canvas) self.canvas.SetZIndex(animal.shape.canvas, 2) self.animal_shapes.append(animal.shape) animal.shape.update_state() def _draw_food(self): for food in self.world.food: if not", "= y1 ln.X2 = x2 ln.Y2 = y2 ln.StrokeThickness = 0.2 ln.Stroke =", "= 0.2 ln.Stroke = brush self.canvas.Children.Add(ln) class AnimalShape(object): def __init__(self, animal, renderer): self._draw_smell" ]
[ "url = f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish and Issue ----------- measurement_prod_key", "compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish and Issue", "is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host =", "parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ),", "= 0 while True: status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if", "True: status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if status == BatchStatus.INVALID:", "= BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0)", "tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request", "import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix,", "# ----------- Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey())", "SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch", "user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger,", "measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ]", "import BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType,", "user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api',", "ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request)", "Exception(\"INVALID\") i += 1 if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED)", "= SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart(", "if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if i > 30:", "TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i = 0 while True: status = ledger.get_batch_status(handle).status", "self.wait_for_commit(ledger, handle) # ----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request =", "), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey()) batch.add_request(retire_request) handle", "ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch)", "sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address,", "bip32utils import BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus,", "handle) # ----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest(", "# Meatering points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always", "measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1,", "4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address =", "DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart,", "handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- split_request =", "] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # -----------", "RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i = 0 while True: status", "measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc),", ") measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020,", "= TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle", "user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 )", "BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is", "# ----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address,", "GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch =", "Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class", "= ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise Exception(\"INVALID\")", "ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(),", "----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020,", "is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_2_meter_5", ") batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade", "transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request)", "settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey()", "self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey", "class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i = 0 while True: status =", "1 if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def", "= generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch =", "user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12,", "end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey())", "BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest,", "import DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest,", "generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i = 0 while", "pytest import time from datetime import datetime, timezone from bip32utils import BIP32Key from", "src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address,", "== BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if", "TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle):", "test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is", "time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode())", "compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}'", "12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key =", "measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1,", "----------- Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request", "batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO", "source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25", "MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def", "== BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if i > 30: raise Exception(\"TIMEOUT\")", "always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api',", "user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey()) batch.add_request(retire_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger,", "is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account =", "user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is", "user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey())", "private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey()) batch.add_request(retire_request)", "address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch =", "= PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc),", "user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port", "parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] )", "PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1',", "8008) port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger = Ledger(url) # -----------", "user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # -----------", "points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account", "wait_for_commit(self, ledger, handle): i = 0 while True: status = ledger.get_batch_status(handle).status if status", "Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the", "ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch", "source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger,", "generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey()", "4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 )", "batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- transfer_request", "SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()),", "user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with", "time.sleep(5) host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger", "user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is", "handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- transfer_request =", "user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering", "0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5)", "= user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port =", "batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO ----------- settlement_address =", "from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest,", "tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger,", "while True: status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if status ==", "host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger =", "----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address,", "measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13,", "raise Exception(\"INVALID\") i += 1 if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status,", "8008) url = f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish and Issue -----------", "amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle)", "amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ]", "= user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points", "address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch =", "----------- Trade the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [", "compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger = Ledger(url) #", "user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey())", "source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch)", "GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[", "@pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode())", "generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4,", "ledger, handle): i = 0 while True: status = ledger.get_batch_status(handle).status if status ==", "user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as", "source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()),", "private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey()) batch.add_request(retire_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle)", "from datetime import datetime, timezone from bip32utils import BIP32Key from testcontainers.compose import DockerCompose", "Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n", "batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO -----------", "address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION,", "= user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1,", "DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url", "measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4,", "SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i =", "RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i = 0", "i += 1 if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest", "measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle =", "), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] )", "points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host", "handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT,", "status == BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if i > 30: raise", "user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle)", "destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) #", "from bip32utils import BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger, Batch,", "generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4,", "measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request)", ") batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire", "Meatering points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5)", "address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION,", "amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address,", "handle): i = 0 while True: status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED:", "13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey())", "= ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- split_request = SplitGGORequest(", "def wait_for_commit(self, ledger, handle): i = 0 while True: status = ledger.get_batch_status(handle).status if", "4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 )", "measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc),", "# ----------- Trade the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts =", "user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account =", "= user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts", "0 while True: status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if status", "handle) # ----------- Trade the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()),", "Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO -----------", "= BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points", "Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey =", "@pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) #", "tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040)", "batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO", "timezone from bip32utils import BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger,", "= generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020,", "generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey())", "BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if i", "= f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish and Issue ----------- measurement_prod_key =", "begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100", "always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0)", "= user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\")", "= PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc),", "1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008)", "# Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always", "import datetime, timezone from bip32utils import BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk", "BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase):", "= generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020,", "port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish", "handle) # ----------- Trade the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts", "status == BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1", "if status == BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i +=", "the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch", "= RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO,", "type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest(", "import unittest import pytest import time from datetime import datetime, timezone from bip32utils", "measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() )", "batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- split_request", "+= 1 if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast", "def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts", "user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering", "end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address", "SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO,", "1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address", "RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()),", "i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key", "AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i = 0 while True:", "BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1)", "= user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1,", "# Meatering points is always 1.n user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5) with DockerCompose(\"./test\") as compose:", "with DockerCompose(\"./test\") as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008)", "as compose: time.sleep(5) host = compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url =", "unittest import pytest import time from datetime import datetime, timezone from bip32utils import", "ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(),", "datetime, timezone from bip32utils import BIP32Key from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import", "import pytest import time from datetime import datetime, timezone from bip32utils import BIP32Key", "13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest(", "Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(),", "----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart(", "= generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()),", "user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) #", "Trade the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), )", "# Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always", "the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO,", "self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address,", "----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50", "Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request =", "self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO,", "Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address,", "> 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key =", "# ----------- Trade the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO,", "ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request", "measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ),", "BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey =", "1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key", "begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50", "= Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO", "1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) #", "settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey()) retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO,", "RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch", "Ledger(url) # ----------- Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT,", "break if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i += 1 if i >", "Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO ----------- settlement_address", "fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle)", "= compose.get_service_host('rest-api', 8008) port = compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger = Ledger(url)", "SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle =", "datetime import datetime, timezone from bip32utils import BIP32Key from testcontainers.compose import DockerCompose from", "batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # -----------", "always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_1_meter_42 =", "GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()),", "= [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart(", "----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey())", "user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest( address=measurement_prod_address, begin=datetime(2020, 4, 1, 12,", "address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch)", "4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request", "and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey()) measurement_prod_request = PublishMeasurementRequest(", "tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address,", "status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise", "= ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO ----------- settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey())", "retire_request = RetireGGORequest( settlement_address=settlement_address, measurement_address=measurement_con_address, measurement_private_key=measurement_con_key.PrivateKey(), parts=[ RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), private_key=user_2_account.ChildKey(0).PrivateKey() ), RetireGGOPart(", "is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_1_meter_42", "from testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest,", "Accounts is always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n", "1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request =", "PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self,", "0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42)", "30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode())", "ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break if status == BatchStatus.INVALID: raise Exception(\"INVALID\") i", ") ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' )", "amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request)", "always 0.0 user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0) # Meatering points is always 1.n user_2_meter_5 =", "i = 0 while True: status = ledger.get_batch_status(handle).status if status == BatchStatus.COMMITTED: break", "IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger,", "= BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0", "12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address =", "type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124',", "sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request =", "PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1',", "= IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request)", "= Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade", "), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25 ) ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle", "RetireGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey()) batch.add_request(retire_request) handle =", "import time from datetime import datetime, timezone from bip32utils import BIP32Key from testcontainers.compose", "raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self): issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey", "Meatering points is always 1.n user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42) # Accounts is always 0.0", "amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey()) ggo_issue_request = IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412'", "IssueGGORequest( measurement_address=measurement_prod_address, ggo_address=ggo_issue_address, tech_type='T124124', fuel_type='F12412' ) batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle", "split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ),", "= Ledger(url) # ----------- Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address =", ") ] ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) #", "tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.CONSUMPTION, amount=50 ) ggo_issue_address = generate_address(AddressPrefix.GGO,", "= Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Retire GGO -----------", "BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account", "1, 13, tzinfo=timezone.utc), sector='DK1', type=MeasurementType.PRODUCTION, amount=100 ) measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT,", "time from datetime import datetime, timezone from bip32utils import BIP32Key from testcontainers.compose import", "Trade the GGO ----------- split_request = SplitGGORequest( source_private_key=measurement_prod_key.PrivateKey(), source_address=ggo_issue_address, parts = [ SplitGGOPart(", "issuer_key = BIP32Key.fromEntropy(\"this_will_be_the_issuers_main_key_entropy\".encode()) user_1_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always", "address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()), amount=25", "BIP32Key.fromEntropy(\"this_will_be_user_one_who_has_the_production_device\".encode()) user_2_masterkey = BIP32Key.fromEntropy(\"this_will_be_user_two_who_has_the_production_device\".encode()) # Accounts is always 0.0 user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0) #", "batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(split_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the", "f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040)", "[ SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()), amount=50 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), amount=25 ), SplitGGOPart( address=generate_address(AddressPrefix.GGO,", "Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart", "if i > 30: raise Exception(\"TIMEOUT\") time.sleep(1) self.assertEqual(status, BatchStatus.COMMITTED) @pytest.mark.integrationtest @pytest.mark.trylast def test_integration(self):", "testcontainers.compose import DockerCompose from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest,", "ledger = Ledger(url) # ----------- Publish and Issue ----------- measurement_prod_key = user_1_meter_42.ChildKey(26429040) measurement_prod_address", "SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart class TestIntegration(unittest.TestCase): def wait_for_commit(self, ledger, handle): i", "measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc), end=datetime(2020, 4, 1, 13,", "measurement_con_key = user_2_meter_5.ChildKey(26429040) measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey()) measurement_con_request = PublishMeasurementRequest( address=measurement_con_address, begin=datetime(2020, 4,", "= ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) # ----------- Trade the GGO ----------- transfer_request = TransferGGORequest(", "address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), private_key=user_2_account.ChildKey(1).PrivateKey() ) ] ) batch = Batch(user_2_masterkey.PrivateKey()) batch.add_request(retire_request) handle = ledger.execute_batch(batch)", "= compose.get_service_port('rest-api', 8008) url = f'http://{host}:{port}' ledger = Ledger(url) # ----------- Publish and", ") batch = Batch(issuer_key.PrivateKey()) batch.add_request(measurement_prod_request) batch.add_request(measurement_con_request) batch.add_request(ggo_issue_request) handle = ledger.execute_batch(batch) self.wait_for_commit(ledger, handle) #", "TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()), ) batch = Batch(user_1_masterkey.PrivateKey()) batch.add_request(transfer_request) handle =", "----------- Trade the GGO ----------- transfer_request = TransferGGORequest( source_private_key=user_1_account.ChildKey(1).PrivateKey(), source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()), destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey())," ]
[ "json loading to halt # for long time. super().__init__() if not isinstance(delta, int):", "up resources (closing files, joining threads, removing dirs etc) # pass pass def", "Init # is called when json is read. A big init will cause", "A big init will cause json loading to halt # for long time.", "very minimal. Init # is called when json is read. A big init", "# if event_type == EventType.START_RUN: # # Create all major components here. #", "= model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self,", "components here. # pass # elif event_type == EventType.END_RUN: # # Clean up", "shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable: # Any kind of", "delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of components should", "of components should be very minimal. Init # is called when json is", "make sure they abort any running processes here. Returns: Shareable: Shareable with return_code.", "pass def execute( self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, )", "-> Shareable: \"\"\"Abort execution. This is used if abort_signal is triggered. Users should", "interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution.", "import time from nvflare.apis.executor import Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import", "check abort_signal regularly count, interval = 0, 0.5 while count < self._sleep_time: if", "count, interval = 0, 0.5 while count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable()", "self._get_exception_shareable() time.sleep(interval) count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self)", "is called when json is read. A big init will cause json loading", "major components here. # pass # elif event_type == EventType.END_RUN: # # Clean", "interval = 0, 0.5 while count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval)", "Init functions of components should be very minimal. Init # is called when", "event_type: str, fl_ctx: FLContext): # if event_type == EventType.START_RUN: # # Create all", "count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable:", "raise TypeError(\"\") self._delta = delta self._model_name = model_name self._model_dir = model_dir self._sleep_time =", "handle_event(self, event_type: str, fl_ctx: FLContext): # if event_type == EventType.START_RUN: # # Create", "FLContext, abort_signal: Signal, ) -> Shareable: # Any kind of tasks waiting should", "any running processes here. Returns: Shareable: Shareable with return_code. \"\"\" shareable = Shareable()", "-> Shareable: # Any kind of tasks waiting should check abort_signal regularly count,", "Shareable: # Any kind of tasks waiting should check abort_signal regularly count, interval", "nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__( self, delta=1,", "time. super().__init__() if not isinstance(delta, int): raise TypeError(\"\") self._delta = delta self._model_name =", "< self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval shareable = Shareable()", "Create all major components here. # pass # elif event_type == EventType.END_RUN: #", "regularly count, interval = 0, 0.5 while count < self._sleep_time: if abort_signal.triggered: return", "they abort any running processes here. Returns: Shareable: Shareable with return_code. \"\"\" shareable", "isinstance(delta, int): raise TypeError(\"\") self._delta = delta self._model_name = model_name self._model_dir = model_dir", "nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor):", "will cause json loading to halt # for long time. super().__init__() if not", "import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal import", "0.5 while count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval", "be very minimal. Init # is called when json is read. A big", "int): raise TypeError(\"\") self._delta = delta self._model_name = model_name self._model_dir = model_dir self._sleep_time", "nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\",", "processes here. Returns: Shareable: Shareable with return_code. \"\"\" shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return", "elif event_type == EventType.END_RUN: # # Clean up resources (closing files, joining threads,", "= submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext): # if event_type == EventType.START_RUN:", "from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,", "model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of components should be very minimal. Init", "read. A big init will cause json loading to halt # for long", "_get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is used if abort_signal is triggered. Users", "model_dir=\"model\", ): # Init functions of components should be very minimal. Init #", "import Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import", "# pass # elif event_type == EventType.END_RUN: # # Clean up resources (closing", "files, joining threads, removing dirs etc) # pass pass def execute( self, task_name:", "+= interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort", "Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN,", "pass pass def execute( self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal,", "self._delta = delta self._model_name = model_name self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name", "event_type == EventType.END_RUN: # # Clean up resources (closing files, joining threads, removing", "train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext): # if event_type", "sure they abort any running processes here. Returns: Shareable: Shareable with return_code. \"\"\"", "class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): #", "self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION)", "Shareable, fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable: # Any kind of tasks", "# # Clean up resources (closing files, joining threads, removing dirs etc) #", "here. Returns: Shareable: Shareable with return_code. \"\"\" shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable", "from nvflare.apis.executor import Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from", "abort_signal: Signal, ) -> Shareable: # Any kind of tasks waiting should check", "event_type == EventType.START_RUN: # # Create all major components here. # pass #", "self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of components", "to halt # for long time. super().__init__() if not isinstance(delta, int): raise TypeError(\"\")", "here. # pass # elif event_type == EventType.END_RUN: # # Clean up resources", "resources (closing files, joining threads, removing dirs etc) # pass pass def execute(", "Any kind of tasks waiting should check abort_signal regularly count, interval = 0,", "= 0, 0.5 while count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count", "abort_signal is triggered. Users should make sure they abort any running processes here.", "should be very minimal. Init # is called when json is read. A", "threads, removing dirs etc) # pass pass def execute( self, task_name: str, shareable:", "removing dirs etc) # pass pass def execute( self, task_name: str, shareable: Shareable,", "nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal", "train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of components should be very", "return self._get_exception_shareable() time.sleep(interval) count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def", "called when json is read. A big init will cause json loading to", "str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable: # Any kind", "shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This", "execute( self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable:", "shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is used if abort_signal is", "joining threads, removing dirs etc) # pass pass def execute( self, task_name: str,", "# elif event_type == EventType.END_RUN: # # Clean up resources (closing files, joining", "= train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext): # if", "minimal. Init # is called when json is read. A big init will", "if not isinstance(delta, int): raise TypeError(\"\") self._delta = delta self._model_name = model_name self._model_dir", "Users should make sure they abort any running processes here. Returns: Shareable: Shareable", "TypeError(\"\") self._delta = delta self._model_name = model_name self._model_dir = model_dir self._sleep_time = sleep_time", "): # Init functions of components should be very minimal. Init # is", "running processes here. Returns: Shareable: Shareable with return_code. \"\"\" shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION)", "self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext): # if event_type ==", "# Clean up resources (closing files, joining threads, removing dirs etc) # pass", "sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext):", "import FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import", "__init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of", "Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__(", "big init will cause json loading to halt # for long time. super().__init__()", "etc) # pass pass def execute( self, task_name: str, shareable: Shareable, fl_ctx: FLContext,", "self._model_name = model_name self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name", "is triggered. Users should make sure they abort any running processes here. Returns:", "str, fl_ctx: FLContext): # if event_type == EventType.START_RUN: # # Create all major", "super().__init__() if not isinstance(delta, int): raise TypeError(\"\") self._delta = delta self._model_name = model_name", "task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable: # Any", "execution. This is used if abort_signal is triggered. Users should make sure they", "import Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def", "# for long time. super().__init__() if not isinstance(delta, int): raise TypeError(\"\") self._delta =", "Clean up resources (closing files, joining threads, removing dirs etc) # pass pass", "AppConstants class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ):", "abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable", "def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is used if abort_signal is triggered.", "from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from", "NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init", "nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant", "for long time. super().__init__() if not isinstance(delta, int): raise TypeError(\"\") self._delta = delta", "import AppConstants class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\",", "should make sure they abort any running processes here. Returns: Shareable: Shareable with", "loading to halt # for long time. super().__init__() if not isinstance(delta, int): raise", "all major components here. # pass # elif event_type == EventType.END_RUN: # #", "# pass pass def execute( self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal:", "when json is read. A big init will cause json loading to halt", "# Create all major components here. # pass # elif event_type == EventType.END_RUN:", "while count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval shareable", "# # Create all major components here. # pass # elif event_type ==", "EventType.START_RUN: # # Create all major components here. # pass # elif event_type", "(closing files, joining threads, removing dirs etc) # pass pass def execute( self,", "== EventType.END_RUN: # # Clean up resources (closing files, joining threads, removing dirs", "waiting should check abort_signal regularly count, interval = 0, 0.5 while count <", "submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext): # if event_type == EventType.START_RUN: #", "is read. A big init will cause json loading to halt # for", "Shareable: \"\"\"Abort execution. This is used if abort_signal is triggered. Users should make", "def __init__( self, delta=1, sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions", "should check abort_signal regularly count, interval = 0, 0.5 while count < self._sleep_time:", "\"\"\"Abort execution. This is used if abort_signal is triggered. Users should make sure", "= delta self._model_name = model_name self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name =", "cause json loading to halt # for long time. super().__init__() if not isinstance(delta,", "abort any running processes here. Returns: Shareable: Shareable with return_code. \"\"\" shareable =", "halt # for long time. super().__init__() if not isinstance(delta, int): raise TypeError(\"\") self._delta", "triggered. Users should make sure they abort any running processes here. Returns: Shareable:", "model_name self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name", "if event_type == EventType.START_RUN: # # Create all major components here. # pass", "if abort_signal is triggered. Users should make sure they abort any running processes", "self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str, fl_ctx: FLContext): #", "nvflare.apis.executor import Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable", "def handle_event(self, event_type: str, fl_ctx: FLContext): # if event_type == EventType.START_RUN: # #", "This is used if abort_signal is triggered. Users should make sure they abort", "fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable: # Any kind of tasks waiting", "submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of components should be very minimal.", "# Init functions of components should be very minimal. Init # is called", "import Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__( self, delta=1, sleep_time=0,", "self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def", "delta self._model_name = model_name self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name", "sleep_time=0, train_task_name=AppConstants.TASK_TRAIN, submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, model_name=\"best_numpy.npy\", model_dir=\"model\", ): # Init functions of components should be", "# is called when json is read. A big init will cause json", "return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is used if abort_signal", "ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal", "= sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str, fl_ctx:", "== EventType.START_RUN: # # Create all major components here. # pass # elif", "dirs etc) # pass pass def execute( self, task_name: str, shareable: Shareable, fl_ctx:", "time from nvflare.apis.executor import Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext", "of tasks waiting should check abort_signal regularly count, interval = 0, 0.5 while", "from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from", "from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants class NPTrainer(Executor): def __init__( self,", "json is read. A big init will cause json loading to halt #", "is used if abort_signal is triggered. Users should make sure they abort any", ") -> Shareable: # Any kind of tasks waiting should check abort_signal regularly", "init will cause json loading to halt # for long time. super().__init__() if", "components should be very minimal. Init # is called when json is read.", "fl_ctx: FLContext): # if event_type == EventType.START_RUN: # # Create all major components", "self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type: str,", "tasks waiting should check abort_signal regularly count, interval = 0, 0.5 while count", "def execute( self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, ) ->", "= model_name self._model_dir = model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name =", "abort_signal regularly count, interval = 0, 0.5 while count < self._sleep_time: if abort_signal.triggered:", "Signal, ) -> Shareable: # Any kind of tasks waiting should check abort_signal", "= Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is", "shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is used if", "long time. super().__init__() if not isinstance(delta, int): raise TypeError(\"\") self._delta = delta self._model_name", "Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) -> Shareable: \"\"\"Abort execution. This is used", "not isinstance(delta, int): raise TypeError(\"\") self._delta = delta self._model_name = model_name self._model_dir =", "count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval shareable =", "Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable", "used if abort_signal is triggered. Users should make sure they abort any running", "FLContext from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants", "FLContext): # if event_type == EventType.START_RUN: # # Create all major components here.", "from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants class", "if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return", "pass # elif event_type == EventType.END_RUN: # # Clean up resources (closing files,", "EventType.END_RUN: # # Clean up resources (closing files, joining threads, removing dirs etc)", "functions of components should be very minimal. Init # is called when json", "0, 0.5 while count < self._sleep_time: if abort_signal.triggered: return self._get_exception_shareable() time.sleep(interval) count +=", "self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal, ) -> Shareable: #", "time.sleep(interval) count += interval shareable = Shareable() shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION) return shareable def _get_exception_shareable(self) ->", "# Any kind of tasks waiting should check abort_signal regularly count, interval =", "kind of tasks waiting should check abort_signal regularly count, interval = 0, 0.5", "model_dir self._sleep_time = sleep_time self._train_task_name = train_task_name self._submit_model_task_name = submit_model_task_name def handle_event(self, event_type:" ]
[ "\"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes = str(data) #", "in range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs", "bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec,", "self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind = \"Invalid\" # The next item", "is the signature, if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes =", "# FIXME: This is a manually Python translation of the C++ # llbuild::buildsystem::BuildValue", "self.signature if self.outputs is not None: output += \", outputs=%r\" % self.outputs if", "+= \")\" return output class FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode, self.size,", "stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes) ==", "str(data) # The first byte is the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\",", "self.strings = [] else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) ==", "not None: output += \", strings=%r\" % self.strings output += \")\" return output", "= struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r,", "= self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind = \"Invalid\" # The next", "primary_key=True) name = Column('key', String, nullable=False) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__,", "\", signature=%0x\" % self.signature if self.outputs is not None: output += \", outputs=%r\"", "Declaration Base = declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer, nullable=False,", "self.outputs is not None: output += \", outputs=%r\" % self.outputs if self.strings is", "self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer, nullable=False, primary_key=True)", "stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1] ==", "if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature", "else: self.outputs = None # The strings follow, if used. if self.hasStringList: stringsLength", "Binary, nullable=True) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at,", "= Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key", "\"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes", "Column(Integer, nullable=False, primary_key=True) name = Column('key', String, nullable=False) def __repr__(self): return \"%s%r\" %", "= bytes[8:] if stringsLength == 0: self.strings = [] else: stringData = bytes[:stringsLength]", "inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % ( self.device, self.inode, self.mode, self.size, self.modTime[0], self.modTime[1])", "stringsLength == 0: self.strings = [] else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:]", "access directly yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\",", "the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs = None # The", "hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" %", "The strings follow, if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes =", "next item is the signature, if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0]", "from sqlalchemy.ext.declarative import declarative_base # DB Declaration Base = declarative_base() class KeyName(Base): __tablename__", "used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs = []", "def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property", "dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id,", "struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength == 0: self.strings = [] else:", "numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs = [] for i in", "if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs =", "(self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id", "= bytes[4:] self.outputs = [] for i in range(numOutputs): # Read the file", "\"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property def value(self): return", "a manually Python translation of the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate,", "from sqlalchemy import * from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import declarative_base", "Base = declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer, nullable=False, primary_key=True)", "output += \", strings=%r\" % self.strings output += \")\" return output class FileInfo(object):", "self.dependencies_bytes is None: return [] else : num_dependencies = len(self.dependencies_bytes) / 8 return", "+ \"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME: This is a manually Python", "None # The outputs follow, if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0]", "= None assert len(bytes) == 0 @property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\",", "nullable=False, primary_key=True) name = Column('key', String, nullable=False) def __repr__(self): return \"%s%r\" % (", "def __init__(self, data): bytes = str(data) # The first byte is the kind.", "if self.strings is not None: output += \", strings=%r\" % self.strings output +=", "\"key_names\" id = Column(Integer, nullable=False, primary_key=True) name = Column('key', String, nullable=False) def __repr__(self):", "assert len(stringData) == stringsLength assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings", "\"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes =", "signature=%0x\" % self.signature if self.outputs is not None: output += \", outputs=%r\" %", "self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature = None # The", "unfortunate, but it isn't # available via an API we can access directly", "self.dependencies_bytes) ### class BuildValue(object): # FIXME: This is a manually Python translation of", "C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't # available via", "return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id", "= len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class", "Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key = relation(KeyName)", "[ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\",", "key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at = Column(Integer,", "follow, if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs", "modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r,", "bytes[8:] if stringsLength == 0: self.strings = [] else: stringData = bytes[:stringsLength] bytes", "declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer, nullable=False, primary_key=True) name =", "relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__,", "signature, if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else:", "self.strings = None assert len(bytes) == 0 @property def hasCommandSignature(self): return self.kind in", "used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature =", "self.kind if self.signature is not None: output += \", signature=%0x\" % self.signature if", "self.signature is not None: output += \", signature=%0x\" % self.signature if self.outputs is", "bytes[1:] else: self.kind = \"Invalid\" # The next item is the signature, if", "BuildValue(object): # FIXME: This is a manually Python translation of the C++ #", "nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at = Column(Integer,", "struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r,", "else : num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\",", "= Column(Integer, nullable=False, primary_key=True) name = Column('key', String, nullable=False) def __repr__(self): return \"%s%r\"", "= Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\",", "self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is None:", "directly yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\",", "This is a manually Python translation of the C++ # llbuild::buildsystem::BuildValue type, which", "kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\",", "if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs = [] for", "FIXME: This is a manually Python translation of the C++ # llbuild::buildsystem::BuildValue type,", "assert len(bytes) == 0 @property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property", "from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import declarative_base # DB Declaration Base", "/ 8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object): #", "# The outputs follow, if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes", "return [] else : num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies)", "else: self.kind = \"Invalid\" # The next item is the signature, if used.", "= stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes) == 0 @property def hasCommandSignature(self):", "\"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id =", "used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength ==", "if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength == 0:", "if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind = \"Invalid\"", "class BuildValue(object): # FIXME: This is a manually Python translation of the C++", "return output class FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano)", "relation, relationship from sqlalchemy.ext.declarative import declarative_base # DB Declaration Base = declarative_base() class", "in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def", "of the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't #", "# llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't # available via an", "nullable=True) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at))", "i in range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else:", "self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id =", "first byte is the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes =", "\"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes = str(data)", "= None # The strings follow, if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\",", "% self.kind if self.signature is not None: output += \", signature=%0x\" % self.signature", "self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def", "llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't # available via an API", "bytes = bytes[4:] self.outputs = [] for i in range(numOutputs): # Read the", "= Column('key', String, nullable=False) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name))", "self.outputs if self.strings is not None: output += \", strings=%r\" % self.strings output", "is not None: output += \", strings=%r\" % self.strings output += \")\" return", "= [] for i in range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes", "self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength == 0: self.strings", "return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind", "== '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes) == 0", "\"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output", "[] else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) == stringsLength assert", "bytes = bytes[1:] else: self.kind = \"Invalid\" # The next item is the", "ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at =", "KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer, nullable=False, primary_key=True) name = Column('key', String,", "yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\",", "is the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else:", "not None: output += \", signature=%0x\" % self.signature if self.outputs is not None:", "byte is the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:]", "assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes)", "\", strings=%r\" % self.strings output += \")\" return output class FileInfo(object): def __init__(self,", "return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\")", "in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def", "__init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime =", "FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes)", "self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return", "def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ =", "== 0 @property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self):", "+ str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME: This is a", "hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in (\"DirectoryContents\",", "str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME: This is a manually", "bytes[:4])[0] bytes = bytes[4:] self.outputs = [] for i in range(numOutputs): # Read", "% ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer,", "# available via an API we can access directly yet. kinds = [", "self.outputs = None # The strings follow, if used. if self.hasStringList: stringsLength =", "return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\",", "which is unfortunate, but it isn't # available via an API we can", "= bytes[48:] else: self.outputs = None # The strings follow, if used. if", "\"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return", "= bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1] == '\\0'", "\"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\",", "@property def value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is None: return", "data): bytes = str(data) # The first byte is the kind. if bytes:", "sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import declarative_base # DB Declaration Base =", "bytes[:8])[0] bytes = bytes[8:] else: self.signature = None # The outputs follow, if", "\"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % ( self.device, self.inode, self.mode, self.size, self.modTime[0],", "return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is None: return [] else :", "return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % ( self.device, self.inode, self.mode, self.size,", "not None: output += \", outputs=%r\" % self.outputs if self.strings is not None:", "= \"key_names\" id = Column(Integer, nullable=False, primary_key=True) name = Column('key', String, nullable=False) def", "built_at = Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes =", "# DB Declaration Base = declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id =", "num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ###", "key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\" %", "nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at", "bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1] == '\\0' self.strings", "def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in", "(\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self):", "return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME: This", "self.key, self.value, self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if", "but it isn't # available via an API we can access directly yet.", "strings=%r\" % self.strings output += \")\" return output class FileInfo(object): def __init__(self, bytes):", "def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\"", "0 @property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return", "\"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes = str(data) # The first", "\"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind if self.signature is not None:", "\"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes =", "String, nullable=False) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base):", "is unfortunate, but it isn't # available via an API we can access", "= Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary,", "class RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer,", "bytes = bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1] == '\\0' self.strings =", "strings follow, if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:]", "self.signature = None # The outputs follow, if used. if self.hasOutputInfo: numOutputs =", "primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at =", "stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength == 0: self.strings =", "self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property def", "id = Column(Integer, nullable=False, primary_key=True) name = Column('key', String, nullable=False) def __repr__(self): return", "for i in range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:]", "outputs=%r\" % self.outputs if self.strings is not None: output += \", strings=%r\" %", "Python translation of the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but it", "class KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer, nullable=False, primary_key=True) name = Column('key',", "relationship from sqlalchemy.ext.declarative import declarative_base # DB Declaration Base = declarative_base() class KeyName(Base):", "( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer, nullable=False,", "output += \")\" return output class FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode,", "stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes) == 0 @property def hasCommandSignature(self): return", "__tablename__ = \"key_names\" id = Column(Integer, nullable=False, primary_key=True) name = Column('key', String, nullable=False)", "= struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength == 0: self.strings = []", "an API we can access directly yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\",", "Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary,", "@property def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output =", "# Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs = None", "[] for i in range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes =", "BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is None: return [] else : num_dependencies", "None assert len(bytes) == 0 @property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\")", "+= \", strings=%r\" % self.strings output += \")\" return output class FileInfo(object): def", "= Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self):", "= bytes[1:] else: self.kind = \"Invalid\" # The next item is the signature,", "Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key =", "bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r,", "id = Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\",", "name = Column('key', String, nullable=False) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id,", "self.value, self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes", "__repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property def", "computed_at = Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def", "struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature = None # The outputs follow,", "dependencies(self): if self.dependencies_bytes is None: return [] else : num_dependencies = len(self.dependencies_bytes) /", "* from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import declarative_base # DB Declaration", "The first byte is the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes", "(\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind if self.signature is", "output = \"BuildValue(kind=%r\" % self.kind if self.signature is not None: output += \",", "if self.dependencies_bytes is None: return [] else : num_dependencies = len(self.dependencies_bytes) / 8", "= [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\",", "is not None: output += \", signature=%0x\" % self.signature if self.outputs is not", "via an API we can access directly yet. kinds = [ \"Invalid\", \"VirtualInput\",", "if self.signature is not None: output += \", signature=%0x\" % self.signature if self.outputs", "(modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % (", "def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % ( self.device, self.inode,", "self.kind = \"Invalid\" # The next item is the signature, if used. if", "# The first byte is the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]]", "output += \", outputs=%r\" % self.outputs if self.strings is not None: output +=", "if stringsLength == 0: self.strings = [] else: stringData = bytes[:stringsLength] bytes =", "= \"Invalid\" # The next item is the signature, if used. if self.hasCommandSignature:", "\"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\",", "__tablename__ = \"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False)", "+= \", outputs=%r\" % self.outputs if self.strings is not None: output += \",", "bytes[8:] else: self.signature = None # The outputs follow, if used. if self.hasOutputInfo:", "Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value,", "if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature = None", "RuleResult(Base): __tablename__ = \"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id),", "self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind = \"Invalid\" # The", "stringsLength assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None assert", "= Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key,", "'\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes) == 0 @property", "= bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\")", "output class FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) =", "def dependencies(self): if self.dependencies_bytes is None: return [] else : num_dependencies = len(self.dependencies_bytes)", "bytes = bytes[8:] if stringsLength == 0: self.strings = [] else: stringData =", "the signature, if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:]", "Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs = None #", "\"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\",", "Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return", "the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't # available", "DB Declaration Base = declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer,", "\"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ]", "0: self.strings = [] else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData)", "% ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes)", "self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\"", "in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind if self.signature", "None: return [] else : num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" +", "% self.outputs if self.strings is not None: output += \", strings=%r\" % self.strings", "None # The strings follow, if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0]", "\"DirectoryContents\", \"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def", "= bytes[8:] else: self.signature = None # The outputs follow, if used. if", "def __init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime", "available via an API we can access directly yet. kinds = [ \"Invalid\",", "sqlalchemy.ext.declarative import declarative_base # DB Declaration Base = declarative_base() class KeyName(Base): __tablename__ =", "len(bytes) == 0 @property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def", "def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in", "__repr__(self): output = \"BuildValue(kind=%r\" % self.kind if self.signature is not None: output +=", "[] else : num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies) +", "= (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" %", "value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is None: return [] else", "self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs = None # The strings follow, if", "import relation, relationship from sqlalchemy.ext.declarative import declarative_base # DB Declaration Base = declarative_base()", "self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind if", "self.strings is not None: output += \", strings=%r\" % self.strings output += \")\"", "bytes[:8])[0] bytes = bytes[8:] if stringsLength == 0: self.strings = [] else: stringData", "struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs = [] for i in range(numOutputs): #", "import * from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import declarative_base # DB", "The outputs follow, if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes =", "= struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs = [] for i in range(numOutputs):", "follow, if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if", "(\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self):", "isn't # available via an API we can access directly yet. kinds =", "self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None assert len(bytes) == 0 @property def", "\", outputs=%r\" % self.outputs if self.strings is not None: output += \", strings=%r\"", "struct from sqlalchemy import * from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import", "bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind = \"Invalid\" #", "bytes[0])[0]] bytes = bytes[1:] else: self.kind = \"Invalid\" # The next item is", "modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % ( self.device,", "is None: return [] else : num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\"", "bytes = bytes[48:] else: self.outputs = None # The strings follow, if used.", "else: self.strings = None assert len(bytes) == 0 @property def hasCommandSignature(self): return self.kind", "modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self): return \"FileInfo(device=%r, inode=%#0x,", "self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:] self.outputs = [] for i", "% self.strings output += \")\" return output class FileInfo(object): def __init__(self, bytes): (self.device,", "translation of the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't", "output += \", signature=%0x\" % self.signature if self.outputs is not None: output +=", "None: output += \", outputs=%r\" % self.outputs if self.strings is not None: output", "(self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano)", "API we can access directly yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\",", "== 0: self.strings = [] else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert", "\"SuccessfulCommand\", \"DirectoryContents\") def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind if self.signature is not", "else: self.signature = None # The outputs follow, if used. if self.hasOutputInfo: numOutputs", "self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in (\"ExistingInput\", \"SuccessfulCommand\", \"DirectoryContents\")", "= struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature = None # The outputs", "Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at", "declarative_base # DB Declaration Base = declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id", "bytes = bytes[8:] else: self.signature = None # The outputs follow, if used.", "nullable=False) built_at = Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes", "else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1]", "# The next item is the signature, if used. if self.hasCommandSignature: self.signature =", "@property def dependencies(self): if self.dependencies_bytes is None: return [] else : num_dependencies =", "\"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data):", ": num_dependencies = len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes)", "range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs =", "bytes[4:] self.outputs = [] for i in range(numOutputs): # Read the file information.", "import declarative_base # DB Declaration Base = declarative_base() class KeyName(Base): __tablename__ = \"key_names\"", "= Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False)", "if self.outputs is not None: output += \", outputs=%r\" % self.outputs if self.strings", "= str(data) # The first byte is the kind. if bytes: self.kind =", "we can access directly yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\",", "% self.signature if self.outputs is not None: output += \", outputs=%r\" % self.outputs", "the kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind", "( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property", "bytes[stringsLength:] assert len(stringData) == stringsLength assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else:", "manually Python translation of the C++ # llbuild::buildsystem::BuildValue type, which is unfortunate, but", "__repr__(self): return \"FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))\" % ( self.device, self.inode, self.mode,", "is a manually Python translation of the C++ # llbuild::buildsystem::BuildValue type, which is", "\"DirectoryTreeSignature\", \"StaleFileRemoval\", \"MissingOutput\", \"FailedInput\", \"SuccessfulCommand\", \"FailedCommand\", \"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self,", "item is the signature, if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes", "\"Invalid\" # The next item is the signature, if used. if self.hasCommandSignature: self.signature", "None: output += \", strings=%r\" % self.strings output += \")\" return output class", "import struct from sqlalchemy import * from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative", "if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] if stringsLength", "__repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__ = \"rule_results\"", "### class BuildValue(object): # FIXME: This is a manually Python translation of the", "type, which is unfortunate, but it isn't # available via an API we", "self.hasCommandSignature: self.signature = struct.unpack(\"<Q\", bytes[:8])[0] bytes = bytes[8:] else: self.signature = None #", "@property def hasCommandSignature(self): return self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind", "8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME:", "+= \", signature=%0x\" % self.signature if self.outputs is not None: output += \",", "= declarative_base() class KeyName(Base): __tablename__ = \"key_names\" id = Column(Integer, nullable=False, primary_key=True) name", "file information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs = None # The strings", "self.outputs = [] for i in range(numOutputs): # Read the file information. self.outputs.append(FileInfo(bytes[:48]))", "= [] else: stringData = bytes[:stringsLength] bytes = bytes[stringsLength:] assert len(stringData) == stringsLength", "len(stringData) == stringsLength assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings =", "\"PropagatedFailureCommand\", \"CancelledCommand\", \"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes = str(data) # The", "\"SkippedCommand\", \"Target\", ] def __init__(self, data): bytes = str(data) # The first byte", "return \"%s%r\" % ( self.__class__.__name__, (self.id, self.key, self.value, self.built_at, self.computed_at)) @property def value(self):", "struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME: This is", "len(self.dependencies_bytes) / 8 return struct.unpack(\"<\" + str(num_dependencies) + \"Q\", self.dependencies_bytes) ### class BuildValue(object):", "= \"rule_results\" id = Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes", "The next item is the signature, if used. if self.hasCommandSignature: self.signature = struct.unpack(\"<Q\",", "value_bytes = Column(\"value\", Binary, nullable=False) built_at = Column(Integer, nullable=False) computed_at = Column(Integer, nullable=False)", "\"Q\", self.dependencies_bytes) ### class BuildValue(object): # FIXME: This is a manually Python translation", "hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind in (\"ExistingInput\",", "is not None: output += \", outputs=%r\" % self.outputs if self.strings is not", "nullable=False) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class RuleResult(Base): __tablename__", "# The strings follow, if used. if self.hasStringList: stringsLength = struct.unpack(\"<Q\", bytes[:8])[0] bytes", "] def __init__(self, data): bytes = str(data) # The first byte is the", "outputs follow, if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\", bytes[:4])[0] bytes = bytes[4:]", "class FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\",", "can access directly yet. kinds = [ \"Invalid\", \"VirtualInput\", \"ExistingInput\", \"MissingInput\", \"DirectoryContents\", \"DirectoryTreeSignature\",", "\"Target\", ] def __init__(self, data): bytes = str(data) # The first byte is", "nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\"", "Column('key', String, nullable=False) def __repr__(self): return \"%s%r\" % ( self.__class__.__name__, (self.id, self.name)) class", "self.kind in (\"SuccessfulCommand\", \"DirectoryTreeSignature\") @property def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property", "\"BuildValue(kind=%r\" % self.kind if self.signature is not None: output += \", signature=%0x\" %", "= \"BuildValue(kind=%r\" % self.kind if self.signature is not None: output += \", signature=%0x\"", "= relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True) def __repr__(self): return \"%s%r\" % (", "sqlalchemy import * from sqlalchemy.orm import relation, relationship from sqlalchemy.ext.declarative import declarative_base #", "bytes[48:] else: self.outputs = None # The strings follow, if used. if self.hasStringList:", "@property def hasStringList(self): return self.kind in (\"DirectoryContents\", \"StaleFileRemoval\") @property def hasOutputInfo(self): return self.kind", "self.strings output += \")\" return output class FileInfo(object): def __init__(self, bytes): (self.device, self.inode,", "== stringsLength assert stringData[-1] == '\\0' self.strings = stringData[:-1].split(\"\\0\") else: self.strings = None", "bytes = str(data) # The first byte is the kind. if bytes: self.kind", "def value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is None: return []", "self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property def dependencies(self): if self.dependencies_bytes is", "= None # The outputs follow, if used. if self.hasOutputInfo: numOutputs = struct.unpack(\"<I\",", "None: output += \", signature=%0x\" % self.signature if self.outputs is not None: output", "information. self.outputs.append(FileInfo(bytes[:48])) bytes = bytes[48:] else: self.outputs = None # The strings follow,", "(self.id, self.key, self.value, self.built_at, self.computed_at)) @property def value(self): return BuildValue(self.value_bytes) @property def dependencies(self):", "it isn't # available via an API we can access directly yet. kinds", "__init__(self, data): bytes = str(data) # The first byte is the kind. if", "self.mode, self.size, modTimeSec, modTimeNano) = struct.unpack(\"<QQQQQQ\", bytes) self.modTime = (modTimeSec, modTimeNano) def __repr__(self):", "def __repr__(self): output = \"BuildValue(kind=%r\" % self.kind if self.signature is not None: output", "Column(Integer, nullable=False, primary_key=True) key_id = Column(Integer, ForeignKey(KeyName.id), nullable=False) value_bytes = Column(\"value\", Binary, nullable=False)", "nullable=False) computed_at = Column(Integer, nullable=False) key = relation(KeyName) dependencies_bytes = Column(\"dependencies\", Binary, nullable=True)", "kind. if bytes: self.kind = self.__class__.kinds[struct.unpack(\"<B\", bytes[0])[0]] bytes = bytes[1:] else: self.kind =", "\")\" return output class FileInfo(object): def __init__(self, bytes): (self.device, self.inode, self.mode, self.size, modTimeSec," ]
[ "in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to be searched: ')) pos=binarySearch(arr,v) if", "array import* a=array('i',[]) def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]==", "a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted", "value to be searched: ')) pos=binarySearch(arr,v) if pos!=-1: print('position of',v,'is:',pos) else: print('Value not", "to be searched: ')) pos=binarySearch(arr,v) if pos!=-1: print('position of',v,'is:',pos) else: print('Value not found')", "elements') for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to be searched:", "n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements') for i in range(n): x=int(input()) arr.append(x)", "from array import* a=array('i',[]) def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if", "a=array('i',[]) def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return", "elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements')", "arr.append(x) v=int(input('Enter the value to be searched: ')) pos=binarySearch(arr,v) if pos!=-1: print('position of',v,'is:',pos)", "v=int(input('Enter the value to be searched: ')) pos=binarySearch(arr,v) if pos!=-1: print('position of',v,'is:',pos) else:", "lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid elif a[mid]>val: ub=mid-1", "range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to be searched: ')) pos=binarySearch(arr,v) if pos!=-1:", "if a[mid]== val: return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1", "a[mid]== val: return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[])", "ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid elif a[mid]>val: ub=mid-1 elif", "print('Enter',n,'sorted array elements') for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to", "a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements') for", "ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array", "val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid elif a[mid]>val:", "-1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements') for i in range(n):", "def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid", "while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val:", "<reponame>KishalayB18/Mini-Python-Projects<filename>3. BinarySearch.py from array import* a=array('i',[]) def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub:", "the value to be searched: ')) pos=binarySearch(arr,v) if pos!=-1: print('position of',v,'is:',pos) else: print('Value", "size: ',)) print('Enter',n,'sorted array elements') for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the", "mid=(lb+ub)//2 if a[mid]== val: return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return", "binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid elif", "return -1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements') for i in", "x=int(input()) arr.append(x) v=int(input('Enter the value to be searched: ')) pos=binarySearch(arr,v) if pos!=-1: print('position", "BinarySearch.py from array import* a=array('i',[]) def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2", "',)) print('Enter',n,'sorted array elements') for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value", "mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size:", "return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array", "array size: ',)) print('Enter',n,'sorted array elements') for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter", "array elements') for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to be", "i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to be searched: ')) pos=binarySearch(arr,v)", "lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements') for i", "elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter array size: ',))", "for i in range(n): x=int(input()) arr.append(x) v=int(input('Enter the value to be searched: '))", "val: return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1 return -1 arr=array('i',[]) n=int(input('Enter", "lb<=ub: mid=(lb+ub)//2 if a[mid]== val: return mid elif a[mid]>val: ub=mid-1 elif a[mid]<val: lb=mid+1", "arr=array('i',[]) n=int(input('Enter array size: ',)) print('Enter',n,'sorted array elements') for i in range(n): x=int(input())", "import* a=array('i',[]) def binarySearch(a, val): lb=0 ub=len(a)-1 while lb<=ub: mid=(lb+ub)//2 if a[mid]== val:" ]
[ "import ConfigRenderer from .profile import Profile, UserConfig, PROFILES_DIR from .project import Project from", ".renderer import ConfigRenderer from .profile import Profile, UserConfig, PROFILES_DIR from .project import Project", "from .profile import Profile, UserConfig, PROFILES_DIR from .project import Project from .runtime import", ".profile import Profile, UserConfig, PROFILES_DIR from .project import Project from .runtime import RuntimeConfig", "from .renderer import ConfigRenderer from .profile import Profile, UserConfig, PROFILES_DIR from .project import", "ConfigRenderer from .profile import Profile, UserConfig, PROFILES_DIR from .project import Project from .runtime", "<gh_stars>0 from .renderer import ConfigRenderer from .profile import Profile, UserConfig, PROFILES_DIR from .project" ]
[ "key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer =", "@swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\",", "201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", }, ) def post(self, request): \"\"\"Add", "serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider =", "coming from the front-end Return : response (Response) : the response. GET request", "DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self, request): \"\"\"Send the list of DataProvider", "the request coming from the front-end Return : response (Response) : the response.", "def get(self, request): \"\"\"Send the list of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"):", "Equipment, FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider import ( DataProviderException, add_job, scheduler,", "return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\"", "DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be", "ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return", "request, pk): \"\"\"Delete the DataProvider corresponding to the given key.\"\"\" try: dataprovider =", "status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an", "query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self, request): \"\"\"Send the", "import BASE_DIR from utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models", "key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider", "update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to the given", "endpoints for our utilities.\"\"\" import logging import os from drf_yasg.utils import swagger_auto_schema from", "if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider", "\"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def put(self, request, pk):", "the permissions, it will send HTTP 401. - The request must contain the", "request\", 401: \"Unhauthorized\", 501: \"Not implemented\" }, ) def post(self, request): \"\"\"Test of", "configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response =", "404: \"Not found\", }, ) def put(self, request, pk): \"\"\"Update the DataProvider corresponding", "will send HTTP 401. - The request must contain the python file name", "200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def", "= serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider", "DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", }, ) def post(self, request): \"\"\"Add a", "}, ) def get(self, request, pk): \"\"\"Send the dataprovider corresponding to the given", "or create a new one. Parameter : request (HttpRequest) : the request coming", "from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider import (", "request, pk): \"\"\"Send the dataprovider corresponding to the given key.\"\"\" try: equipment =", "DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from rest_framework import status from rest_framework.response import", "equipments, 'data_providers': data_providers } ) dict_res = serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res)", "DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the", "Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value", "{param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors,", "401: \"Unhauthorized\", 404: \"Not found\", }, ) def get(self, request, pk): \"\"\"Send the", "or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to the given key.',", "corresponding to the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not", "FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider", "Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our endpoint for testing the config of", "scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer,", "DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create a new one. Parameter : request", "return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False),", "Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider", "post(self, request): \"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer =", "send HTTP 400. - If the user doesn't have the permissions, it will", "DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id)", "response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value =", "provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not", "status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our endpoint for testing the", "the response. GET request : list all dataproviders and return the data POST", "class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding", "user doesn't have the permissions, it will send HTTP 401. - The request", "Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False),", "pk): \"\"\"Send the dataprovider corresponding to the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk)", "Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our endpoint for testing", "of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not", "address, the reccurence and the concerned \\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send", "query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", }, ) def post(self,", "dataprovider corresponding to the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404:", "= DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers } ) dict_res = serializer.data.copy() dict_res['python_files']", "pk): \"\"\"Update the DataProvider corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk)", ": - create a new dataprovider, send HTTP 201. \\ If the request", "= DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the", ": the response. GET request : list all dataproviders and return the data", "\"Unhauthorized\", 404: \"Not found\", }, ) def get(self, request, pk): \"\"\"Send the dataprovider", "to the given key.', query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\", 404: \"Not", "else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response", "200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", }, ) def get(self, request, pk):", "send HTTP 201. \\ If the request is not valid, send HTTP 400.", "found\", }, ) def get(self, request, pk): \"\"\"Send the dataprovider corresponding to the", "return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else:", "HTTP 400. - If the user doesn't have the permissions, it will send", "a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401:", "import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider from utils.serializers", "if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider", "{data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id)", "from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import BASE_DIR", "to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\",", "responses={ 200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\" }, )", "the DataProvider corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist:", "@swagger_auto_schema( operation_description='Update the DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False),", "\"\"\"Delete the DataProvider corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except", "new dataprovider, send HTTP 201. \\ If the request is not valid, send", "operation_description='Delete the DataProvider corresponding to the given key.', query_serializer=None, responses={ 204: \"No content\",", "( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from rest_framework import", "'equipments': equipments, 'data_providers': data_providers } ) dict_res = serializer.data.copy() dict_res['python_files'] = python_files return", "DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update", "}, ) def put(self, request, pk): \"\"\"Update the DataProvider corresponding to the given", "200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self, request): \"\"\"Send the list of", "Response from rest_framework.views import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all", "rest_framework.views import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or", "file to provide our endpoints for our utilities.\"\"\" import logging import os from", "DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers }", "DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\",", "dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data,", "DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return", "doesn't have the permissions, it will send HTTP 401. - The request must", "partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) )", "Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400:", "delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to the given key.', query_serializer=None,", "our utilities.\"\"\" import logging import os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import", "utils.models import DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from", ") logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\": value} return Response(response, status=status.HTTP_200_OK) except", "Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete", "200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\" }, ) def", "given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer", "\"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to the", "the concerned \\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider", "404: \"Not found\", }, ) def delete(self, request, pk): \"\"\"Delete the DataProvider corresponding", "Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our endpoint", "class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create a new one. Parameter :", "Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the given key.', query_serializer=None, responses={ 204:", "given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", }, )", "logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException", "it will send HTTP 401. - The request must contain the python file", "dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors,", ") def delete(self, request, pk): \"\"\"Delete the DataProvider corresponding to the given key.\"\"\"", "serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502)", "= DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer", "pk): \"\"\"Delete the DataProvider corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk)", "with {data}\".format(data=request.data)) response = {\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as e:", "401: \"Unhauthorized\", 501: \"Not implemented\" }, ) def post(self, request): \"\"\"Test of data", "if not serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not", "scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED)", "test_dataprovider_configuration, ) from utils.models import DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer,", "python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all()", "\"Not implemented\" }, ) def post(self, request): \"\"\"Test of data provider's configuration.\"\"\" if", "{\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'],", "swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider import", "return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete", "to provide our endpoints for our utilities.\"\"\" import logging import os from drf_yasg.utils", "from rest_framework.views import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders", "dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if", "= DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id:", "Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider", "return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the", "request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with", "from rest_framework.response import Response from rest_framework.views import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView):", "the DataProvider corresponding to the given key.', query_serializer=None, responses={ 204: \"No content\", 401:", "corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401:", "the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", },", "ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info(", "dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else:", "- create a new dataprovider, send HTTP 201. \\ If the request is", "request (HttpRequest) : the request coming from the front-end Return : response (Response)", "to the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if", "the python file name of the dataprovider,\\ the targeted IP address, the reccurence", "provide our endpoints for our utilities.\"\"\" import logging import os from drf_yasg.utils import", "query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\" },", "serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers } ) dict_res = serializer.data.copy()", ") from utils.models import DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer,", "import ObjectDoesNotExist from rest_framework import status from rest_framework.response import Response from rest_framework.views import", "@swagger_auto_schema( operation_description='Send the list of DataProvider in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False),", "def put(self, request, pk): \"\"\"Update the DataProvider corresponding to the given key.\"\"\" try:", "or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\": serializer.errors} return", "request): \"\"\"Add a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except", "be our endpoint for testing the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of", "the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"):", "into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", },", "- If the user doesn't have the permissions, it will send HTTP 401.", "TestDataProvider(APIView): \"\"\"This will be our endpoint for testing the config of a dataprovider.\"\"\"", "DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from rest_framework import status", "\"Not found\", }, ) def delete(self, request, pk): \"\"\"Delete the DataProvider corresponding to", "given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED", "400. - If the user doesn't have the permissions, it will send HTTP", "} ) dict_res = serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema(", "openCMMS.settings import BASE_DIR from utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from", "dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "import DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions", "\"Unhauthorized\", 404: \"Not found\", }, ) def put(self, request, pk): \"\"\"Update the DataProvider", "if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED)", "and return the data POST request : - create a new dataprovider, send", "the list of DataProvider in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\",", "serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK)", "DataProvider in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def", "'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\" }, ) def post(self,", "return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our endpoint for", "400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\" }, ) def post(self, request):", "query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\", },", "if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider)", "data POST request : - create a new dataprovider, send HTTP 201. \\", "logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update", "the targeted IP address, the reccurence and the concerned \\ equipment and field.", "to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if", "request : - create a new dataprovider, send HTTP 201. \\ If the", "dataprovider,\\ the targeted IP address, the reccurence and the concerned \\ equipment and", "\"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\" }, ) def post(self, request): \"\"\"Test", "\"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if", "list of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py'))", "DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider from utils.serializers import (", "list of DataProvider in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", },", "(HttpRequest) : the request coming from the front-end Return : response (Response) :", "in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in", "serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data)", "\"Unhauthorized\", }, ) def get(self, request): \"\"\"Send the list of DataProvider in the", "python file name of the dataprovider,\\ the targeted IP address, the reccurence and", "DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if", "drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import BASE_DIR from", "data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers':", "501: \"Not implemented\" }, ) def post(self, request): \"\"\"Test of data provider's configuration.\"\"\"", "def post(self, request): \"\"\"Add a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\"))", "reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", }, ) def get(self, request,", "corresponding to the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND)", "request must contain the python file name of the dataprovider,\\ the targeted IP", "= DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers", "permissions, it will send HTTP 401. - The request must contain the python", "def delete(self, request, pk): \"\"\"Delete the DataProvider corresponding to the given key.\"\"\" try:", "request, pk): \"\"\"Update the DataProvider corresponding to the given key.\"\"\" try: dataprovider =", "with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return", "query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def", "one. Parameter : request (HttpRequest) : the request coming from the front-end Return", "new one. Parameter : request (HttpRequest) : the request coming from the front-end", "found\", }, ) def put(self, request, pk): \"\"\"Update the DataProvider corresponding to the", "request): \"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data)", ": request (HttpRequest) : the request coming from the front-end Return : response", "implemented\" }, ) def post(self, request): \"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\")", "try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider,", "database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", }, ) def", "def post(self, request): \"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer", "request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\": value} return Response(response, status=status.HTTP_200_OK)", "\"\"\"Send the list of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR,", "serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value", "DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED)", "return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our endpoint for testing the config", "ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider", "post(self, request): \"\"\"Add a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\"))", "the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"):", "return the data POST request : - create a new dataprovider, send HTTP", "a new dataprovider, send HTTP 201. \\ If the request is not valid,", "\"Unhauthorized\", }, ) def post(self, request): \"\"\"Add a DataProvider into the database.\"\"\" if", "ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema(", "send HTTP 401. - The request must contain the python file name of", "corresponding to the given key.', query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\", 404:", "If the user doesn't have the permissions, it will send HTTP 401. -", "serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into", "field. \"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider in the database.', query_serializer=None, responses={", "responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", }, ) def post(self, request):", "dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to", "test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider", ") def get(self, request): \"\"\"Send the list of DataProvider in the database.\"\"\" if", "all dataproviders and return the data POST request : - create a new", "have the permissions, it will send HTTP 401. - The request must contain", "The request must contain the python file name of the dataprovider,\\ the targeted", "401: \"Unhauthorized\", 404: \"Not found\", }, ) def delete(self, request, pk): \"\"\"Delete the", "the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={", "from openCMMS.settings import BASE_DIR from utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, )", "\"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider in the database.', query_serializer=None, responses={ 200:", "provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response", "Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return", "request : list all dataproviders and return the data POST request : -", "\"Bad request\", 401: \"Unhauthorized\", }, ) def post(self, request): \"\"\"Add a DataProvider into", "utilities.\"\"\" import logging import os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment,", "Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201:", "valid, send HTTP 400. - If the user doesn't have the permissions, it", "the given key.', query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\", 404: \"Not found\",", "request\", 401: \"Unhauthorized\", }, ) def post(self, request): \"\"\"Add a DataProvider into the", "BASE_DIR from utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models import", "If the request is not valid, send HTTP 400. - If the user", "import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from rest_framework", "\"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save() if", "database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data)", "request coming from the front-end Return : response (Response) : the response. GET", "the dataprovider corresponding to the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\",", "try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED", "database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__'))", "= os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments", "response = {\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response =", "the front-end Return : response (Response) : the response. GET request : list", "return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={", "status=status.HTTP_200_OK) try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value =", "test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\": value}", "responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\", }, )", "equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers } )", "the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404:", "}, ) def delete(self, request, pk): \"\"\"Delete the DataProvider corresponding to the given", "DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if", "response (Response) : the response. GET request : list all dataproviders and return", "is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", ") def post(self, request): \"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"):", "Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the given key.', query_serializer=None,", "logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data,", "DataProvider corresponding to the given key.', query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\",", "except DataProviderException as e: response = {\"error\": str(e)} return Response(response, status=status.HTTP_200_OK) return Response(status=status.HTTP_401_UNAUTHORIZED)", "( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider from utils.serializers import", "@swagger_auto_schema( operation_description='Send the dataprovider corresponding to the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False),", "from the front-end Return : response (Response) : the response. GET request :", "502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data))", "data_providers } ) dict_res = serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED)", "if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return", "utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from", "DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data", "request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema(", "= DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data)", "equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return", "from utils.models import DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, )", "except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED)", "request): \"\"\"Send the list of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files =", "{dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider", "DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def put(self,", "401. - The request must contain the python file name of the dataprovider,\\", "try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment)", ") ) dataprovider = serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer", "FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration,", "import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create", "responses={ 204: \"No content\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def delete(self,", "given key.', query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\", 404: \"Not found\", },", "class TestDataProvider(APIView): \"\"\"This will be our endpoint for testing the config of a", "logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create a new one. Parameter", "dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED)", "logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create a new", "the list of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers'))", "operation_description='Send the list of DataProvider in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401:", "DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save() if dataprovider.is_activated", "query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", }, ) def get(self,", ") dataprovider = serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer =", ") def get(self, request, pk): \"\"\"Send the dataprovider corresponding to the given key.\"\"\"", "request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\": serializer.errors}", "operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\", 401:", "reccurence and the concerned \\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the list", "and field. \"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider in the database.', query_serializer=None,", "is not valid, send HTTP 400. - If the user doesn't have the", "return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the given key.',", "the dataprovider corresponding to the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist:", "= Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers } ) dict_res", "request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding", "if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers", "the request is not valid, send HTTP 400. - If the user doesn't", ") def put(self, request, pk): \"\"\"Update the DataProvider corresponding to the given key.\"\"\"", "will be our endpoint for testing the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test", "\"Not found\", }, ) def get(self, request, pk): \"\"\"Send the dataprovider corresponding to", "value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] )", "our endpoint for testing the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data", "request is not valid, send HTTP 400. - If the user doesn't have", "python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False),", "front-end Return : response (Response) : the response. GET request : list all", "= DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will", "in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self,", "except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data))", "dict_res = serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a", "= DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try:", "return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED", "status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send", "{\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response = {\"error\": str(e)}", "the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"):", "dataprovider, send HTTP 201. \\ If the request is not valid, send HTTP", "dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad", "DataProvider with {data}\".format(data=request.data)) response = {\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as", "get(self, request): \"\"\"Send the list of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files", "of DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if", "value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response = {\"error\": str(e)} return", "if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding", "HTTP 401. - The request must contain the python file name of the", "operation_description='Send the dataprovider corresponding to the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401:", "Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers } ) dict_res =", "401: \"Unhauthorized\", }, ) def post(self, request): \"\"\"Add a DataProvider into the database.\"\"\"", "dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the", "given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer", "key.', query_serializer=None, responses={ 204: \"No content\", 401: \"Unhauthorized\", 404: \"Not found\", }, )", "201. \\ If the request is not valid, send HTTP 400. - If", "import Equipment, FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider import ( DataProviderException, add_job,", "401: \"Unhauthorized\", }, ) def get(self, request): \"\"\"Send the list of DataProvider in", "import logging import os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject", "POST request : - create a new dataprovider, send HTTP 201. \\ If", "= {\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response = {\"error\":", "DataProviderRequirementsSerializer( { 'equipments': equipments, 'data_providers': data_providers } ) dict_res = serializer.data.copy() dict_res['python_files'] =", "\"\"\"This is our file to provide our endpoints for our utilities.\"\"\" import logging", ") dict_res = serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add", "found\", }, ) def delete(self, request, pk): \"\"\"Delete the DataProvider corresponding to the", "DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", }, ) def get(self, request, pk): \"\"\"Send", "configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501: \"Not implemented\"", "'data_providers': data_providers } ) dict_res = serializer.data.copy() dict_res['python_files'] = python_files return Response(dict_res) return", "python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer", "endpoint for testing the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's", "a new one. Parameter : request (HttpRequest) : the request coming from the", "add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class", "ObjectDoesNotExist from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView", "= dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return", "{ 'equipments': equipments, 'data_providers': data_providers } ) dict_res = serializer.data.copy() dict_res['python_files'] = python_files", "Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200:", "dataprovider = serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider)", "404: \"Not found\", }, ) def get(self, request, pk): \"\"\"Send the dataprovider corresponding", "to the given key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\",", "logging import os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from", "'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all()", "import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider", "if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if", "= DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider),", "return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This will be our", "return Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response = {\"error\": str(e)} return Response(response,", "dataproviders and return the data POST request : - create a new dataprovider,", "equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider in the database.',", "IP address, the reccurence and the concerned \\ equipment and field. \"\"\" @swagger_auto_schema(", "request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers =", "the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer =", "the dataprovider,\\ the targeted IP address, the reccurence and the concerned \\ equipment", "except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete()", "data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid():", "Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response = {\"error\": str(e)} return Response(response, status=status.HTTP_200_OK)", "\\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider in the", "except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid():", "of DataProvider in the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, )", "{data}\".format(data=request.data)) response = {\"data\": value} return Response(response, status=status.HTTP_200_OK) except DataProviderException as e: response", "list all dataproviders and return the data POST request : - create a", "- The request must contain the python file name of the dataprovider,\\ the", "DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the given", "return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the given key.', query_serializer=None, responses={", "List all dataproviders or create a new one. Parameter : request (HttpRequest) :", "for testing the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\",", "return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={", "\"\"\"Update the DataProvider corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except", "dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView): \"\"\"This", "import status from rest_framework.response import Response from rest_framework.views import APIView logger = logging.getLogger(__name__)", "if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer =", "from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist", "of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK',", "serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format(", "import Response from rest_framework.views import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List", "dataprovider corresponding to the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return", "data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\", 501:", "DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from rest_framework import status from rest_framework.response", "the reccurence and the concerned \\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the", "serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the DataProvider corresponding to", "data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data )", "data=request.data ) ) dataprovider = serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id)", "dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to the given", "DataProvider corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return", "the database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self, request):", "the user doesn't have the permissions, it will send HTTP 401. - The", "return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or", "of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400: \"Bad request\", 401: \"Unhauthorized\",", "dataproviders or create a new one. Parameter : request (HttpRequest) : the request", "400: \"Bad request\", 401: \"Unhauthorized\", }, ) def post(self, request): \"\"\"Add a DataProvider", "django.core.exceptions import ObjectDoesNotExist from rest_framework import status from rest_framework.response import Response from rest_framework.views", "testing the config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False),", "rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView logger =", "from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView logger", "create a new dataprovider, send HTTP 201. \\ If the request is not", "Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save()", "DataProvider in the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__'", "logger.info( \"UPDATED DataProvider {dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save()", "scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to the", "os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import", "scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class TestDataProvider(APIView):", "return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider))) if dataprovider.job_id: scheduler.remove_job(dataprovider.job_id) dataprovider.delete() return Response(status=status.HTTP_204_NO_CONTENT)", "try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.delete_dataprovider\"): logger.info(\"DELETED DataProvider {dataprovider}\".format(dataprovider=repr(dataprovider)))", "\"\"\"This will be our endpoint for testing the config of a dataprovider.\"\"\" @swagger_auto_schema(", "concerned \\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the list of DataProvider in", "DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer =", "GET request : list all dataproviders and return the data POST request :", "the database.\"\"\" if request.user.has_perm(\"utils.view_dataprovider\"): python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files:", "if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'],", "Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema(", "= test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\":", "os.listdir(os.path.join(BASE_DIR, 'utils/data_providers')) python_files.pop(python_files.index('__init__.py')) if '__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments =", "from django.core.exceptions import ObjectDoesNotExist from rest_framework import status from rest_framework.response import Response from", ") from django.core.exceptions import ObjectDoesNotExist from rest_framework import status from rest_framework.response import Response", "the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", }, )", "not valid, send HTTP 400. - If the user doesn't have the permissions,", "\"\"\"Send the dataprovider corresponding to the given key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except", "put(self, request, pk): \"\"\"Update the DataProvider corresponding to the given key.\"\"\" try: dataprovider", "request\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def put(self, request, pk): \"\"\"Update", "key.\"\"\" try: equipment = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer =", "= test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED", "responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self, request): \"\"\"Send the list", "r\"\"\"\\n# List all dataproviders or create a new one. Parameter : request (HttpRequest)", "}, ) def get(self, request): \"\"\"Send the list of DataProvider in the database.\"\"\"", "our endpoints for our utilities.\"\"\" import logging import os from drf_yasg.utils import swagger_auto_schema", "our file to provide our endpoints for our utilities.\"\"\" import logging import os", "}, ) def post(self, request): \"\"\"Test of data provider's configuration.\"\"\" if request.user.has_perm(\"utils.change_dataprovider\") or", "key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\",", "DataProviderDetail(APIView): \"\"\"Retrieve, update or delete an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to", "204: \"No content\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def delete(self, request,", "Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.view_dataprovider\"): serializer = DataProviderDetailsSerializer(equipment) return Response(serializer.data) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Delete the", "a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: 'OK', 400:", "value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response =", "def get(self, request, pk): \"\"\"Send the dataprovider corresponding to the given key.\"\"\" try:", "equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to the given key.', query_serializer=None, reponses={ 200:", "python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments': equipments,", "delete(self, request, pk): \"\"\"Delete the DataProvider corresponding to the given key.\"\"\" try: dataprovider", "all dataproviders or create a new one. Parameter : request (HttpRequest) : the", "dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView):", "DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST)", ": response (Response) : the response. GET request : list all dataproviders and", "must contain the python file name of the dataprovider,\\ the targeted IP address,", "dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return", "\\ If the request is not valid, send HTTP 400. - If the", "{dataprovider} with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save() if dataprovider.is_activated is", "in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( {", "utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider from", "request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response,", "request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider} with", "from utils.data_provider import ( DataProviderException, add_job, scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider", "= DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True)", "try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration(", "is our file to provide our endpoints for our utilities.\"\"\" import logging import", "the data POST request : - create a new dataprovider, send HTTP 201.", "request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'], request.data['port']", "HTTP 201. \\ If the request is not valid, send HTTP 400. -", "status=status.HTTP_200_OK) except DataProviderException as e: response = {\"error\": str(e)} return Response(response, status=status.HTTP_200_OK) return", "operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\",", "database.', query_serializer=None, responses={ 200: DataProviderRequirementsSerializer(many=False), 401: \"Unhauthorized\", }, ) def get(self, request): \"\"\"Send", "request.data['file_name'], request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\": value} return", "401: \"Unhauthorized\", 404: \"Not found\", }, ) def put(self, request, pk): \"\"\"Update the", ") def post(self, request): \"\"\"Add a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try:", "create a new one. Parameter : request (HttpRequest) : the request coming from", "Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with", "a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return", "of the dataprovider,\\ the targeted IP address, the reccurence and the concerned \\", "import os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings", "APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create a", "DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\",", "the DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad", "\"\"\"Add a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist:", "corresponding to the given key.\"\"\" try: dataprovider = DataProvider.objects.get(pk=pk) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND)", "return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Update the DataProvider corresponding to the given key.',", "status from rest_framework.response import Response from rest_framework.views import APIView logger = logging.getLogger(__name__) class", "= logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n# List all dataproviders or create a new one.", "with {data}\".format( dataprovider=repr(dataprovider), data=request.data ) ) dataprovider = serializer.save() if dataprovider.is_activated is False:", "python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer( { 'equipments':", "= {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'],", "an equipment.\"\"\" @swagger_auto_schema( operation_description='Send the dataprovider corresponding to the given key.', query_serializer=None, reponses={", "if request.user.has_perm(\"utils.change_dataprovider\") or request.user.has_perm(\"utils.add_dataprovider\"): serializer = DataProviderCreateSerializer(data=request.data) if not serializer.is_valid(): response = {\"error\":", "given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not", "for our utilities.\"\"\" import logging import os from drf_yasg.utils import swagger_auto_schema from maintenancemanagement.models", "\"Unhauthorized\", 404: \"Not found\", }, ) def delete(self, request, pk): \"\"\"Delete the DataProvider", "file name of the dataprovider,\\ the targeted IP address, the reccurence and the", "Parameter : request (HttpRequest) : the request coming from the front-end Return :", "return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider =", "}, ) def post(self, request): \"\"\"Add a DataProvider into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'):", "DataProvider from utils.serializers import ( DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import", "response. GET request : list all dataproviders and return the data POST request", "@swagger_auto_schema( operation_description='Add a DataProvider into the database.', query_serializer=DataProviderCreateSerializer(many=False), responses={ 201: DataProviderDetailsSerializer(many=False), 400: \"Bad", "@swagger_auto_schema( operation_description='Delete the DataProvider corresponding to the given key.', query_serializer=None, responses={ 204: \"No", "add_job, scheduler, test_dataprovider_configuration, ) from utils.models import DataProvider from utils.serializers import ( DataProviderCreateSerializer,", "and the concerned \\ equipment and field. \"\"\" @swagger_auto_schema( operation_description='Send the list of", "content\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def delete(self, request, pk): \"\"\"Delete", "operation_description='Update the DataProvider corresponding to the given key.', query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200: DataProviderDetailsSerializer(many=False), 400:", "else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class", "if request.user.has_perm(\"utils.change_dataprovider\"): serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True) if serializer.is_valid(): logger.info( \"UPDATED DataProvider {dataprovider}", "request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid():", "maintenancemanagement.models import Equipment, FieldObject from openCMMS.settings import BASE_DIR from utils.data_provider import ( DataProviderException,", "False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return", "(Response) : the response. GET request : list all dataproviders and return the", "key.', query_serializer=None, reponses={ 200: DataProviderDetailsSerializer(many=False), 401: \"Unhauthorized\", 404: \"Not found\", }, ) def", "dataprovider_serializer = DataProviderCreateSerializer(data=request.data) if dataprovider_serializer.is_valid(): logger.info(\"CREATED DataProvider with {param}\".format(param=request.data)) dataprovider = dataprovider_serializer.save() add_job(dataprovider)", "\"Unhauthorized\", 501: \"Not implemented\" }, ) def post(self, request): \"\"\"Test of data provider's", "= DataProviderDetailsSerializer(dataprovider) return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED) return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_401_UNAUTHORIZED) class DataProviderDetail(APIView): \"\"\"Retrieve,", "'__pycache__' in python_files: python_files.pop(python_files.index('__pycache__')) data_providers = DataProvider.objects.all() equipments = Equipment.objects.all() serializer = DataProviderRequirementsSerializer(", "into the database.\"\"\" if request.user.has_perm('utils.add_dataprovider'): try: FieldObject.objects.get(id=request.data.get(\"field_object\")) Equipment.objects.get(id=request.data.get(\"equipment\")) except ObjectDoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) dataprovider_serializer", "request.data['ip_address'], request.data['port'] ) logger.info(\"TESTED DataProvider with {data}\".format(data=request.data)) response = {\"data\": value} return Response(response,", "= serializer.save() if dataprovider.is_activated is False: scheduler.pause_job(dataprovider.job_id) else: scheduler.resume_job(dataprovider.job_id) dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider) return", "not serializer.is_valid(): response = {\"error\": serializer.errors} return Response(response, status=status.HTTP_200_OK) try: if not request.data['port']:", "\"No content\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def delete(self, request, pk):", "name of the dataprovider,\\ the targeted IP address, the reccurence and the concerned", "Return : response (Response) : the response. GET request : list all dataproviders", "contain the python file name of the dataprovider,\\ the targeted IP address, the", "config of a dataprovider.\"\"\" @swagger_auto_schema( operation_description=\"Test of data provider's configuration.\", query_serializer=DataProviderUpdateSerializer(many=False), responses={ 200:", "get(self, request, pk): \"\"\"Send the dataprovider corresponding to the given key.\"\"\" try: equipment", ": the request coming from the front-end Return : response (Response) : the", "400: \"Bad request\", 401: \"Unhauthorized\", 404: \"Not found\", }, ) def put(self, request,", "rest_framework.response import Response from rest_framework.views import APIView logger = logging.getLogger(__name__) class DataProviderList(APIView): r\"\"\"\\n#", "targeted IP address, the reccurence and the concerned \\ equipment and field. \"\"\"", "= python_files return Response(dict_res) return Response(status=status.HTTP_401_UNAUTHORIZED) @swagger_auto_schema( operation_description='Add a DataProvider into the database.',", "DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer, ) from django.core.exceptions import ObjectDoesNotExist from rest_framework import status from", "not request.data['port']: value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502) else: value = test_dataprovider_configuration( request.data['file_name'], request.data['ip_address'],", "\"Not found\", }, ) def put(self, request, pk): \"\"\"Update the DataProvider corresponding to", ": list all dataproviders and return the data POST request : - create" ]
[ "y_logits = tf.math.log(y_pred / (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits, pos_weight=beta) return", "200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int,", "help='number of filters in the conv layers. default: [4, 8, 16]') return parser", "self._epoch_val_accs = [], [] for X in self._train_data: self._train_on_batch(X) for X in self._val_data:", "@staticmethod def get_arguments(): \"\"\"static method for parsing the arguments before instantiating a CAEtrainer\"\"\"", "def _train_on_batch(self, X): \"\"\"carries out a gradient step on a mini-batch.\"\"\" with tf.GradientTape()", "val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up shuffleing for training", "args.train_shuffle # whether to shuffle or not during training. self._model_dir = args.model_dir #", "best_val_loss = 1e6 best_val_acc = 0 # Training Loop # print('-' * 5", "into a list of np.arrays: workspace_list = [] for i in range(num_to_read): path", "type=str, default='max', help='pooling type of the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent", "save the best model during training. # workspace related self._gen_workspace = args.gen_workspace #", "def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE trainer object. Args: -", "epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val loss :", "shape (batch_size, height, width, channel_size) # batch size will be added by the", "accuracy: {}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X):", "self._obj_size_avg = args.obj_size_avg # average size of the objects in the workspace def", "= files[i] # loading and adding an extra dimension to the numpy array.", "Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size +", "if it is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static", "the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects in the", "for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid points in the", "= args.epochs # number of training epochs self._batch_size = args.batch_size # batch size", "with tf.GradientTape() as tape: out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X,", "not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name", "- CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers", "path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces and saved them into", "self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out a validation step on a", "directory: files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files = len(files) #", "workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated workspaces", "width, channel_size) # batch size will be added by the tf.data.Dataset object. workspace", "a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance -", "training if it is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments():", "type=bool, default=False, help='If gen_workspace==False, saved workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces',", "_load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file names in the workspace directory:", "self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the workspace saving folder if", "_validate_on_batch(self, X): \"\"\"carries out a validation step on a mini-batch.\"\"\" out = self._CAE(X)", "and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list", "of workspaces to use for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of", "def _save_model(self): \"\"\"checking whether the path where the model has to be saved", "CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number of filters in", "def _validate_on_batch(self, X): \"\"\"carries out a validation step on a mini-batch.\"\"\" out =", "# getting logits from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon()))", "path = files[i] # loading and adding an extra dimension to the numpy", "self._gen_workspace = args.gen_workspace # whether to newly generate workspaces (True) or use saved", "self._num_workspaces or num_of_files number of workspaces, whichewer is smaller: num_to_read = num_of_files if", "self._optimizer = optimizer self._loss_func = loss_func self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs", "self._obj_size_avg) file_name = 'ws_' + str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path,", "saved workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated", "int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces * 0.2) train_size = self._num_workspaces - val_size", "them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file", "self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val loss : {} /", "CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of the CAE. default: max') parser.add_argument('--latent_dim',", "self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy for a mini-batch.\"\"\" #", "or not during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive", "gradient step on a mini-batch.\"\"\" with tf.GradientTape() as tape: out = self._CAE(X) loss", "or not during training. self._model_dir = args.model_dir # directory to save the best", "parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the objects in the workspace. default: 8')", "* 5) print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading the", "np import argparse import os import matplotlib.pyplot as plt from tensorflow.data import Dataset", "some variables from the parsed arguments.\"\"\" # training related: self._epochs = args.epochs #", "average size of the objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted", "objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss function", "self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X):", "Datasets from the list: val_size = int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces *", "smaller: num_to_read = num_of_files if num_of_files < self._num_workspaces else self._num_workspaces # reading in", "maximum number of objects in the workspace self._obj_size_avg = args.obj_size_avg # average size", "type=bool, default=True, help='Whether to shuffle or not during training. default: True') #parser.add_argument('--pos_weight', type=float,", "tensorflow.keras.optimizers as opt import numpy as np import argparse import os import matplotlib.pyplot", "os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to", "* 0.2) train_size = self._num_workspaces - val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size)", "the accuracy for a mini-batch.\"\"\" # if an entry is bigger than 0.5,", "= [], [] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], []", "if an entry is bigger than 0.5, it is considered as 1: out_rounded", "import BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A", "workspaces into a list of np.arrays: workspace_list = [] for i in range(num_to_read):", "parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str,", "tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking", "to the numpy array. # neede because the Conv2D layer waits for shape", "= len(files) # read in either self._num_workspaces or num_of_files number of workspaces, whichewer", "model has to be saved exists or not and sace the model.\"\"\" if", "creating the Datasets from the list: val_size = int(self._num_workspaces * 0.2) test_size =", "to train on self._grid_size = args.grid_size # number of grid points in the", "argparse import os import matplotlib.pyplot as plt from tensorflow.data import Dataset from tensorflow.keras.losses", "it is considered as 1: out_rounded = tf.cast(out >= 0.5, tf.float32) metric =", "cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the best", "5 + 'TRAINING HAS ENDED' + '-' * 5) print('best validation loss: {}'.format(best_val_loss))", "import Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace import", "best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING HAS ENDED' + '-'", "- tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits,", "< best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1]", "optimizer, loss_func, args): \"\"\"Initializing a CAE trainer object. Args: - CAE: a Convolutional", "def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy for a mini-batch.\"\"\" # if an", "an entry is bigger than 0.5, it is considered as 1: out_rounded =", "plt from tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE", "'TRAINING HAS STARTED' + '-' * 5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses", "0.2) test_size = int(self._num_workspaces * 0.2) train_size = self._num_workspaces - val_size - test_size", "of objects in the workspace self._obj_size_avg = args.obj_size_avg # average size of the", "= args.num_obj_max # maximum number of objects in the workspace self._obj_size_avg = args.obj_size_avg", "self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) # losses and accuracy of the", "help='number of grid points in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum", "\"\"\"calculates the accuracy for a mini-batch.\"\"\" # if an entry is bigger than", "parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to use for training. default: 1000') parser.add_argument('--grid_size',", "grid points in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of", "help='average size of the objects in the workspace. default: 8') # CAE related:", "in the workspace self._obj_size_avg = args.obj_size_avg # average size of the objects in", "# loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out", "for shape (batch_size, height, width, channel_size) # batch size will be added by", "not during training. self._model_dir = args.model_dir # directory to save the best model", "= Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) #", "arguments.\"\"\" # training related: self._epochs = args.epochs # number of training epochs self._batch_size", "save the best trained model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False,", "related self._gen_workspace = args.gen_workspace # whether to newly generate workspaces (True) or use", "number of objects in the workspace self._obj_size_avg = args.obj_size_avg # average size of", "yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max,", "args.obj_size_avg # average size of the objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns", "[4, 8, 16]') return parser def _set_up_from_args(self, args): \"\"\"setting up some variables from", "- test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size)", "{}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) #", "type=int, default=16, help='latent dimension of the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4,", "default=5, help='maximum number of objects in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8,", "X in self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) # losses and accuracy", "self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val loss : {}", "default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces are", "to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to train. default:", "up some variables from the parsed arguments.\"\"\" # training related: self._epochs = args.epochs", "[] for i in range(num_to_read): path = files[i] # loading and adding an", "self._num_workspaces - val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size :", "whichewer is smaller: num_to_read = num_of_files if num_of_files < self._num_workspaces else self._num_workspaces #", "is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for", "/ {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1]))", "training. # workspace related self._gen_workspace = args.gen_workspace # whether to newly generate workspaces", "of filters in the conv layers. default: [4, 8, 16]') return parser def", "1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid points in the workspace. default: 32')", "optimizer self._loss_func = loss_func self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs = [],", "loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the best trained model.", "metric.result().numpy() def _save_model(self): \"\"\"checking whether the path where the model has to be", "It first either loads pre-generated or generates workspaces to train on. Then it", "[] for X in self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) # losses", "= self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights)", "workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss function weighted by beta.", "# maximum number of objects in the workspace self._obj_size_avg = args.obj_size_avg # average", "saving the model, if it is the best so far: if self._val_losses[-1] <", "parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces',", "in the workspace directory: files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files", "tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits, pos_weight=beta)", "Conv2D layer waits for shape (batch_size, height, width, channel_size) # batch size will", "points in the workspace self._num_obj_max = args.num_obj_max # maximum number of objects in", "an extra dimension to the numpy array. # neede because the Conv2D layer", "args): \"\"\"setting up some variables from the parsed arguments.\"\"\" # training related: self._epochs", "= self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out):", "loaded self._num_workspaces = args.num_workspaces # numbr of worksapces to train on self._grid_size =", "\"\"\"returns a weighted cross entropy loss function weighted by beta. \"\"\" def loss(y_true,", "# print('-' * 5 + 'TRAINING HAS STARTED' + '-' * 5) for", "{}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if it is the best so far:", "parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle", "= self._num_workspaces - val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size", "_ = metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether the path where", "of np.arrays: workspace_list = [] for i in range(num_to_read): path = files[i] #", "stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to use for training.", "instance \"\"\" self._CAE = CAE self._optimizer = optimizer self._loss_func = loss_func self._train_losses, self._val_losses", "loss / Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val", "the generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces", "CAE trainer object. Args: - CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE", "default=1000, help='number of workspaces to use for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32,", "= args.gen_workspace # whether to newly generate workspaces (True) or use saved ones", "as 1: out_rounded = tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ =", "and sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path =", "+ val_size): ]).batch(self._batch_size) # setting up shuffleing for training if it is needed:", "Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if it", "saved exists or not and sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name", "'.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces and saved them", "of grid points in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number", "of epochs to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to", "from hwr.cae.cae import CAE from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer class", "it trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc", "default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated workspaces are stored. default:", "shuffle or not during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for", "workspaces.\"\"\" # creating the workspace saving folder if it does not exist yet:", "workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces are used. default: False')", "args.num_obj_max # maximum number of objects in the workspace self._obj_size_avg = args.obj_size_avg #", "def get_arguments(): \"\"\"static method for parsing the arguments before instantiating a CAEtrainer\"\"\" parser", "Autoencoder. An instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance - lass_func: A", "args.model_dir # directory to save the best model during training. # workspace related", "the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out a gradient", "in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the objects", "# batch size will be added by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path),", ": {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc : {} /", "[] self._epoch_train_accs, self._epoch_val_accs = [], [] for X in self._train_data: self._train_on_batch(X) for X", "train_size = self._num_workspaces - val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data =", "= 1e6 best_val_acc = 0 # Training Loop # print('-' * 5 +", "self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING HAS ENDED' + '-' * 5)", "not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace =", "and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss", "= [] for i in range(num_to_read): path = files[i] # loading and adding", "read in either self._num_workspaces or num_of_files number of workspaces, whichewer is smaller: num_to_read", "numpy array. # neede because the Conv2D layer waits for shape (batch_size, height,", "* 0.2) test_size = int(self._num_workspaces * 0.2) train_size = self._num_workspaces - val_size -", "the objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss", "model, if it is the best so far: if self._val_losses[-1] < best_val_loss: best_val_loss", "default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the objects in the workspace.", "16], help='number of filters in the conv layers. default: [4, 8, 16]') return", "class CAEtrainer(): \"\"\"A trainer class for training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE,", "CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE trainer object. Args: - CAE: a", "in os.listdir(self._workspace_dir)] num_of_files = len(files) # read in either self._num_workspaces or num_of_files number", "self._val_losses[-1])) print('Train acc / Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving", "hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer class for training a Convolutional Autoencoder.\"\"\"", "default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the CAE. default: 16') parser.add_argument('--conv_filters',", "Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc :", "\"\"\"static method for parsing the arguments before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser()", "training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive weighting in cross", "self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for parsing the arguments before instantiating a", "= num_of_files if num_of_files < self._num_workspaces else self._num_workspaces # reading in the workspaces", "[], [] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for", "type of the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the", "\"\"\"checking whether the path where the model has to be saved exists or", "train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to train. default: 1e-3')", "default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to train. default: 1e-3') parser.add_argument('--batch_size',", "in either self._num_workspaces or num_of_files number of workspaces, whichewer is smaller: num_to_read =", "if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model", "gen_workspace==False, saved workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the", "the path where the model has to be saved exists or not and", "# workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces are used. default:", "parser.add_argument('--grid_size', type=int, default=32, help='number of grid points in the workspace. default: 32') parser.add_argument('--num_obj_max',", "# saving the model, if it is the best so far: if self._val_losses[-1]", "exists or not and sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name =", "i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i)", "size will be added by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace)", "in cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the", "so far: if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >=", "= 'ws_' + str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated", "if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0 # Training Loop", "out a gradient step on a mini-batch.\"\"\" with tf.GradientTape() as tape: out =", "considered as 1: out_rounded = tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _", "from tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE from", "of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance \"\"\"", "can be loaded self._num_workspaces = args.num_workspaces # numbr of worksapces to train on", "from which saved workspaces can be loaded self._num_workspaces = args.num_workspaces # numbr of", "variables from the parsed arguments.\"\"\" # training related: self._epochs = args.epochs # number", "print('Train loss / Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc /", "= self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING HAS ENDED' + '-' *", "parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3,", "best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING HAS ENDED' +", "tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace", "self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy for", "default=1e-3, help='number of epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size.", "Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting", "best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-'", "model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path)", "= self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy", "tensorflow as tf import tensorflow.keras.optimizers as opt import numpy as np import argparse", "tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy()", "related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of the CAE. default: max') parser.add_argument('--latent_dim', type=int,", "# if an entry is bigger than 0.5, it is considered as 1:", "_set_up_from_args(self, args): \"\"\"setting up some variables from the parsed arguments.\"\"\" # training related:", "# directory to save the best model during training. # workspace related self._gen_workspace", "self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses =", "self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0 # Training Loop # print('-'", "help='number of workspaces to use for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number", "during training. self._model_dir = args.model_dir # directory to save the best model during", "def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the workspace saving folder if it", "training related parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train. default: 200') parser.add_argument('--learning_rate',", "first either loads pre-generated or generates workspaces to train on. Then it trains", "print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading the best model:", "getting logits from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits", "list of file names in the workspace directory: files = [os.path.join(self._workspace_dir, name) for", "lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer = optimizer self._loss_func =", "sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir,", "to be saved exists or not and sace the model.\"\"\" if not os.path.exists(self._model_dir):", "self._num_workspaces else self._num_workspaces # reading in the workspaces into a list of np.arrays:", "bigger than 0.5, it is considered as 1: out_rounded = tf.cast(out >= 0.5,", "the arguments before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs',", "type=str, default='../models/cae', help='directory to save the best trained model. default: ../models/cae') # workspace", "nargs='+', default=[4, 8, 16], help='number of filters in the conv layers. default: [4,", "# workspace related self._gen_workspace = args.gen_workspace # whether to newly generate workspaces (True)", "print('-' * 5 + 'TRAINING HAS STARTED' + '-' * 5) for epoch", "file_name) np.savetxt(path, workspace) print('generated {} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def", "= Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size", "os.listdir(self._workspace_dir)] num_of_files = len(files) # read in either self._num_workspaces or num_of_files number of", "default='../models/cae', help='directory to save the best trained model. default: ../models/cae') # workspace related", "in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] for", "loading and adding an extra dimension to the numpy array. # neede because", "from the list: val_size = int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces * 0.2)", "_save_model(self): \"\"\"checking whether the path where the model has to be saved exists", "on. Then it trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss =", "0.2) train_size = self._num_workspaces - val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data", "folder if it does not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i", "the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss function weighted by", "= self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function", "= np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from the list: val_size =", "loss_func self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses", "help='number of epochs to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs", "= random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i) + '.csv' path =", "trained model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved", "[], [] self._epoch_train_accs, self._epoch_val_accs = [], [] for X in self._train_data: self._train_on_batch(X) for", "X): \"\"\"carries out a gradient step on a mini-batch.\"\"\" with tf.GradientTape() as tape:", "for i in range(num_to_read): path = files[i] # loading and adding an extra", "used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated workspaces are stored.", "workspace related self._gen_workspace = args.gen_workspace # whether to newly generate workspaces (True) or", "workspace_list = [] for i in range(num_to_read): path = files[i] # loading and", "BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer", "self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs =", "setting up shuffleing for training if it is needed: if self._train_shuffle: self._train_data =", "\"\"\"Generating new workspaces.\"\"\" # creating the workspace saving folder if it does not", "not and sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path", "in the workspaces into a list of np.arrays: workspace_list = [] for i", "best trained model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False,", "trainer object. Args: - CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE -", "the Conv2D layer waits for shape (batch_size, height, width, channel_size) # batch size", "parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32,", "workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the objects in the", "# batch size self._train_shuffle = args.train_shuffle # whether to shuffle or not during", "list of np.arrays: workspace_list = [] for i in range(num_to_read): path = files[i]", "\"\"\" def loss(y_true, y_pred): # getting logits from sigmoid output: y_pred = tf.clip_by_value(y_pred,", "1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to", "tf.math.log(y_pred / (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits, pos_weight=beta) return tf.reduce_mean(loss) return", "out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out", "for training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a", "= os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to ' + self._model_dir) def _generate_new_workspaces(self):", "in the conv layers. default: [4, 8, 16]') return parser def _set_up_from_args(self, args):", "= [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] for X in self._train_data: self._train_on_batch(X)", "= [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop", "_calc_accuracy(self, X, out): \"\"\"calculates the accuracy for a mini-batch.\"\"\" # if an entry", "os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating", "is the best so far: if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model()", "cross entropy loss function weighted by beta. \"\"\" def loss(y_true, y_pred): # getting", "the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir, file_name)", "method for parsing the arguments before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() #", "shuffle or not during training. self._model_dir = args.model_dir # directory to save the", "loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out a", "return metric.result().numpy() def _save_model(self): \"\"\"checking whether the path where the model has to", "# read in either self._num_workspaces or num_of_files number of workspaces, whichewer is smaller:", "in the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss function weighted", "on a mini-batch.\"\"\" with tf.GradientTape() as tape: out = self._CAE(X) loss = self._loss_func(X,", "= 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to ' +", "step on a mini-batch.\"\"\" with tf.GradientTape() as tape: out = self._CAE(X) loss =", "a mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def", "of the objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy", "a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE trainer", "the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val loss", "either self._num_workspaces or num_of_files number of workspaces, whichewer is smaller: num_to_read = num_of_files", "= self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for parsing the arguments before instantiating", "A tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer = optimizer self._loss_func = loss_func", "epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], []", "tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace import random_workspace class CAEtrainer():", "default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether", "for CAE. It first either loads pre-generated or generates workspaces to train on.", "either loads pre-generated or generates workspaces to train on. Then it trains the", "the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the objects in", "get_arguments(): \"\"\"static method for parsing the arguments before instantiating a CAEtrainer\"\"\" parser =", "the model has to be saved exists or not and sace the model.\"\"\"", "sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred /", "self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) # losses and accuracy of the epoch:", "[] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE. It first either loads pre-generated", "workspaces to use for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid", "'TRAINING HAS ENDED' + '-' * 5) print('best validation loss: {}'.format(best_val_loss)) print('best validation", "will be added by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) #", "layers. default: [4, 8, 16]') return parser def _set_up_from_args(self, args): \"\"\"setting up some", "is considered as 1: out_rounded = tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy()", "trainer class for training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args):", "self._CAE = CAE self._optimizer = optimizer self._loss_func = loss_func self._train_losses, self._val_losses = [],", "new workspaces.\"\"\" # creating the workspace saving folder if it does not exist", "a gradient step on a mini-batch.\"\"\" with tf.GradientTape() as tape: out = self._CAE(X)", "= args.grid_size # number of grid points in the workspace self._num_obj_max = args.num_obj_max", "= tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether the", "workspace saving folder if it does not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir)", "not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was", "8, 16], help='number of filters in the conv layers. default: [4, 8, 16]')", "training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid points in the workspace.", "output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1", "of worksapces to train on self._grid_size = args.grid_size # number of grid points", "out)) def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy for a mini-batch.\"\"\" # if", "help='number of epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default:", "'-' * 5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs,", "os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name =", "\"\"\" self._CAE = CAE self._optimizer = optimizer self._loss_func = loss_func self._train_losses, self._val_losses =", "type=int, default=1000, help='number of workspaces to use for training. default: 1000') parser.add_argument('--grid_size', type=int,", "workspaces.\"\"\" # list of file names in the workspace directory: files = [os.path.join(self._workspace_dir,", "size self._train_shuffle = args.train_shuffle # whether to shuffle or not during training. self._model_dir", "(False) self._workspace_dir = args.workspace_dir # folder from which saved workspaces can be loaded", "self._epochs = args.epochs # number of training epochs self._batch_size = args.batch_size # batch", "for name in os.listdir(self._workspace_dir)] num_of_files = len(files) # read in either self._num_workspaces or", "to use for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid points", "# CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of the CAE. default: max')", "num_of_files number of workspaces, whichewer is smaller: num_to_read = num_of_files if num_of_files <", "self._model_dir = args.model_dir # directory to save the best model during training. #", "# folder from which saved workspaces can be loaded self._num_workspaces = args.num_workspaces #", "CAEtrainer(): \"\"\"A trainer class for training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer,", "trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc =", "the model, if it is the best so far: if self._val_losses[-1] < best_val_loss:", "len(files) # read in either self._num_workspaces or num_of_files number of workspaces, whichewer is", "= [], [] self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses = [], []", "__call__(self): \"\"\"Training loop for CAE. It first either loads pre-generated or generates workspaces", "number of training epochs self._batch_size = args.batch_size # batch size self._train_shuffle = args.train_shuffle", "<filename>hwr/cae/cae_trainer.py<gh_stars>1-10 import tensorflow as tf import tensorflow.keras.optimizers as opt import numpy as np", "are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated workspaces are", "[], [] for X in self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) #", "workspace_list.append(workspace) # creating the Datasets from the list: val_size = int(self._num_workspaces * 0.2)", "names in the workspace directory: files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)]", "{}'.format(epoch)) print('Train loss / Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc", "print('EPOCH {}'.format(epoch)) print('Train loss / Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train", "test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data", "import numpy as np import argparse import os import matplotlib.pyplot as plt from", "{} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc : {} / {}'.format(self._train_accs[-1],", "path where the model has to be saved exists or not and sace", "self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE. It", "import CAE from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer class for training", "as tf import tensorflow.keras.optimizers as opt import numpy as np import argparse import", "__init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE trainer object. Args: - CAE:", "os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir))", "layer waits for shape (batch_size, height, width, channel_size) # batch size will be", "optimizer: A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE = CAE", "because the Conv2D layer waits for shape (batch_size, height, width, channel_size) # batch", "file_name = 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to '", "_train_on_batch(self, X): \"\"\"carries out a gradient step on a mini-batch.\"\"\" with tf.GradientTape() as", "# reading in the workspaces into a list of np.arrays: workspace_list = []", "range(num_to_read): path = files[i] # loading and adding an extra dimension to the", "directory to save the best model during training. # workspace related self._gen_workspace =", "0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self):", "= [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files = len(files) # read in", "number of grid points in the workspace self._num_obj_max = args.num_obj_max # maximum number", "self._grid_size = args.grid_size # number of grid points in the workspace self._num_obj_max =", "(train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up shuffleing", "# setting up shuffleing for training if it is needed: if self._train_shuffle: self._train_data", "use saved ones (False) self._workspace_dir = args.workspace_dir # folder from which saved workspaces", "opt import numpy as np import argparse import os import matplotlib.pyplot as plt", "# creating the Datasets from the list: val_size = int(self._num_workspaces * 0.2) test_size", "if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg)", "self._num_workspaces = args.num_workspaces # numbr of worksapces to train on self._grid_size = args.grid_size", "args.num_workspaces # numbr of worksapces to train on self._grid_size = args.grid_size # number", "the workspace self._obj_size_avg = args.obj_size_avg # average size of the objects in the", "accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss /", "self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file names in the", "pre-generated or generates workspaces to train on. Then it trains the CAE. \"\"\"", "Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up shuffleing for training if it is", "self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val loss : {} / {}'.format(self._train_losses[-1],", "32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects in the workspace. default: 5')", "validation step on a mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy())", "hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE", "loop for CAE. It first either loads pre-generated or generates workspaces to train", "# loading and adding an extra dimension to the numpy array. # neede", "= metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether the path where the", "default=[4, 8, 16], help='number of filters in the conv layers. default: [4, 8,", "parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200, help='number of epochs to", "CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance", "acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if it is", "Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE trainer object. Args:", "for training if it is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def", "[], [] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args)", "import matplotlib.pyplot as plt from tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy from", "for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' +", "default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or not during training. default:", "default='max', help='pooling type of the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension", "help='If gen_workspace==False, saved workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where", "matplotlib.pyplot as plt from tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae", "tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out a validation step", "a mini-batch.\"\"\" # if an entry is bigger than 0.5, it is considered", "import os import matplotlib.pyplot as plt from tensorflow.data import Dataset from tensorflow.keras.losses import", "dimension to the numpy array. # neede because the Conv2D layer waits for", ":train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size):", "self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for parsing the arguments before", "type=int, default=32, help='number of grid points in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int,", "train on. Then it trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss", "on self._grid_size = args.grid_size # number of grid points in the workspace self._num_obj_max", "workspace directory: files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files = len(files)", "= argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train.", "objects in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the", "32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or not during training. default: True')", "help='folder where the generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number", "help='maximum number of objects in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average", "Args: - CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE - optimizer: A", "the workspaces into a list of np.arrays: workspace_list = [] for i in", "5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of the objects in the workspace. default:", "type=int, nargs='+', default=[4, 8, 16], help='number of filters in the conv layers. default:", "/ (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits, pos_weight=beta) return tf.reduce_mean(loss) return loss", "or not and sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5'", "the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the CAE. default:", "as opt import numpy as np import argparse import os import matplotlib.pyplot as", "for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [],", "Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE trainer object.", "to train on. Then it trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces()", "entropy loss function weighted by beta. \"\"\" def loss(y_true, y_pred): # getting logits", "on a mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out))", "the workspace. default: 8') # CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of", "else self._num_workspaces # reading in the workspaces into a list of np.arrays: workspace_list", "to newly generate workspaces (True) or use saved ones (False) self._workspace_dir = args.workspace_dir", "as np import argparse import os import matplotlib.pyplot as plt from tensorflow.data import", "y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 -", "+ '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces and saved", "parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+',", "from the parsed arguments.\"\"\" # training related: self._epochs = args.epochs # number of", "related: self._epochs = args.epochs # number of training epochs self._batch_size = args.batch_size #", "use for training. default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid points in", "False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder where the generated workspaces are stored. default: ../workspaces')", "of the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number of", "out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss,", "argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train. default:", "import argparse import os import matplotlib.pyplot as plt from tensorflow.data import Dataset from", "for a mini-batch.\"\"\" # if an entry is bigger than 0.5, it is", "function weighted by beta. \"\"\" def loss(y_true, y_pred): # getting logits from sigmoid", "8, 16]') return parser def _set_up_from_args(self, args): \"\"\"setting up some variables from the", "Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace import random_workspace", "as plt from tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import", "be loaded self._num_workspaces = args.num_workspaces # numbr of worksapces to train on self._grid_size", "of file names in the workspace directory: files = [os.path.join(self._workspace_dir, name) for name", "channel_size) # batch size will be added by the tf.data.Dataset object. workspace =", "default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to use for training. default:", "= args.obj_size_avg # average size of the objects in the workspace def weighted_cross_entropy(beta):", "self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out a validation step on", "+ self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the workspace saving folder", "number of objects in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size", "X, out): \"\"\"calculates the accuracy for a mini-batch.\"\"\" # if an entry is", "default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or not", "{}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function", "loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates the", "the objects in the workspace. default: 8') # CAE related: parser.add_argument('--pooling', type=str, default='max',", "in range(num_to_read): path = files[i] # loading and adding an extra dimension to", "default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects in the workspace. default:", "default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive weighting in cross entropy", "STARTED' + '-' * 5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [],", "mini-batch.\"\"\" # if an entry is bigger than 0.5, it is considered as", "filters in the conv layers. default: [4, 8, 16]') return parser def _set_up_from_args(self,", "default: [4, 8, 16]') return parser def _set_up_from_args(self, args): \"\"\"setting up some variables", "from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer class for training a Convolutional", "def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file names in the workspace", "(True) or use saved ones (False) self._workspace_dir = args.workspace_dir # folder from which", "self._val_losses = [], [] self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses = [],", "+ '-' * 5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], []", "\"\"\"carries out a validation step on a mini-batch.\"\"\" out = self._CAE(X) loss =", "by beta. \"\"\" def loss(y_true, y_pred): # getting logits from sigmoid output: y_pred", "default='../workspaces', help='folder where the generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000,", "entry is bigger than 0.5, it is considered as 1: out_rounded = tf.cast(out", "shuffleing for training if it is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod", "True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive weighting in cross entropy loss.", "def loss(y_true, y_pred): # getting logits from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(),", "# help='weight for positive weighting in cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str,", "print('best validation accuracy: {}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def", "tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether the path", "self._epoch_train_accs, self._epoch_val_accs = [], [] for X in self._train_data: self._train_on_batch(X) for X in", "the workspace saving folder if it does not exist yet: if not os.path.exists(self._workspace_dir):", "An instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses", "epochs self._batch_size = args.batch_size # batch size self._train_shuffle = args.train_shuffle # whether to", "# whether to shuffle or not during training. self._model_dir = args.model_dir # directory", "self._train_shuffle = args.train_shuffle # whether to shuffle or not during training. self._model_dir =", "= int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces * 0.2) train_size = self._num_workspaces -", "needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for parsing", "print('model was saved to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" #", "/ {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if it is the best so", "[] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE.", "the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from", ">= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy() def", "to shuffle or not during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight", "or num_of_files number of workspaces, whichewer is smaller: num_to_read = num_of_files if num_of_files", "parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects in the workspace. default: 5') parser.add_argument('--obj_size_avg',", "creating the workspace saving folder if it does not exist yet: if not", "args): \"\"\"Initializing a CAE trainer object. Args: - CAE: a Convolutional Autoencoder. An", "saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of", "or generates workspaces to train on. Then it trains the CAE. \"\"\" if", ": (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up", "tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer = optimizer self._loss_func = loss_func self._train_losses,", "size of the objects in the workspace. default: 8') # CAE related: parser.add_argument('--pooling',", "to save the best trained model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool,", "np.arrays: workspace_list = [] for i in range(num_to_read): path = files[i] # loading", "of the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the CAE.", "generates workspaces to train on. Then it trains the CAE. \"\"\" if self._gen_workspace:", "i in range(num_to_read): path = files[i] # loading and adding an extra dimension", "best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model()", "ENDED' + '-' * 5) print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc))", "mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self,", "self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] for X in self._train_data:", "\"\"\"A trainer class for training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func,", "random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i) + '.csv' path = os.path.join(self._workspace_dir,", "= 0 # Training Loop # print('-' * 5 + 'TRAINING HAS STARTED'", "whether to shuffle or not during training. self._model_dir = args.model_dir # directory to", "to shuffle or not during training. self._model_dir = args.model_dir # directory to save", "HAS ENDED' + '-' * 5) print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy:", "a CAE trainer object. Args: - CAE: a Convolutional Autoencoder. An instance of", "to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the workspace", "# number of training epochs self._batch_size = args.batch_size # batch size self._train_shuffle =", "# whether to newly generate workspaces (True) or use saved ones (False) self._workspace_dir", "instance - lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer = optimizer", "weighting in cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save", "type=int, default=200, help='number of epochs to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number", "self._batch_size = args.batch_size # batch size self._train_shuffle = args.train_shuffle # whether to shuffle", "into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file names", "{} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if it is the best", "has to be saved exists or not and sace the model.\"\"\" if not", "batch size will be added by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32')", "workspace. default: 8') # CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of the", "loss_func, args): \"\"\"Initializing a CAE trainer object. Args: - CAE: a Convolutional Autoencoder.", "grid points in the workspace self._num_obj_max = args.num_obj_max # maximum number of objects", "self._val_data: self._validate_on_batch(X) # losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs))", "import tensorflow as tf import tensorflow.keras.optimizers as opt import numpy as np import", "[os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files = len(files) # read in either", "/ Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if", "for X in self._val_data: self._validate_on_batch(X) # losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses))", "= [], [] for X in self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X)", "self._val_accs[-1])) # saving the model, if it is the best so far: if", "parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number of filters in the conv layers.", "< self._num_workspaces else self._num_workspaces # reading in the workspaces into a list of", "added by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the", "16]') return parser def _set_up_from_args(self, args): \"\"\"setting up some variables from the parsed", "the best model during training. # workspace related self._gen_workspace = args.gen_workspace # whether", "- val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size", "out_rounded = tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded)", "parsed arguments.\"\"\" # training related: self._epochs = args.epochs # number of training epochs", "Convolutional Autoencoder. An instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance - lass_func:", "up shuffleing for training if it is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size)", "+ str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces", "self._num_workspaces # reading in the workspaces into a list of np.arrays: workspace_list =", "the Datasets from the list: val_size = int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces", "a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200, help='number of", "+ 'TRAINING HAS STARTED' + '-' * 5) for epoch in range(self._epochs): self._epoch_train_losses,", "= tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 - y_pred))", "= tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return", "metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether the path where the model", "objects in the workspace. default: 8') # CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling", "# training related parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train. default: 200')", "parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the best trained model. default: ../models/cae') #", "exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size,", "CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200, help='number of epochs", "workspace) print('generated {} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing", "(1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true,", "if it is the best so far: if self._val_losses[-1] < best_val_loss: best_val_loss =", "weighted cross entropy loss function weighted by beta. \"\"\" def loss(y_true, y_pred): #", "int(self._num_workspaces * 0.2) train_size = self._num_workspaces - val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[", "not during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive weighting", "if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for parsing the", "2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the best trained model. default: ../models/cae')", "in the workspace. default: 8') # CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type", "{}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries", "path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to ' + self._model_dir) def", "of the objects in the workspace. default: 8') # CAE related: parser.add_argument('--pooling', type=str,", "related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces are used. default: False') parser.add_argument('--workspace_dir',", "[], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE. It first either loads", "in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects in", "generate workspaces (True) or use saved ones (False) self._workspace_dir = args.workspace_dir # folder", "type=str, default='../workspaces', help='folder where the generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int,", "whether the path where the model has to be saved exists or not", "axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from the list: val_size = int(self._num_workspaces *", "max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the CAE. default: 16') parser.add_argument('--conv_filters', type=int,", "epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle',", "best so far: if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1]", "files[i] # loading and adding an extra dimension to the numpy array. #", "self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0 # Training Loop # print('-' *", "the best so far: if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if", "print('generated {} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved", "generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to", "]).batch(self._batch_size) # setting up shuffleing for training if it is needed: if self._train_shuffle:", "y_pred): # getting logits from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 -", "\"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file names in the workspace directory: files", "for parsing the arguments before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training", "validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir,", "saving folder if it does not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for", "{} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\"", "import tensorflow.keras.optimizers as opt import numpy as np import argparse import os import", "= self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' *", "(batch_size, height, width, channel_size) # batch size will be added by the tf.data.Dataset", "tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 - y_pred)) loss", "a mini-batch.\"\"\" with tf.GradientTape() as tape: out = self._CAE(X) loss = self._loss_func(X, out)", "#@tf.function def _validate_on_batch(self, X): \"\"\"carries out a validation step on a mini-batch.\"\"\" out", "the best trained model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If", "'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out a gradient step on a mini-batch.\"\"\"", "# neede because the Conv2D layer waits for shape (batch_size, height, width, channel_size)", "[] self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs", "step on a mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X,", "range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i) + '.csv'", "validation accuracy: {}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self,", "self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def", "the parsed arguments.\"\"\" # training related: self._epochs = args.epochs # number of training", "\"\"\"Initializing a CAE trainer object. Args: - CAE: a Convolutional Autoencoder. An instance", "0 # Training Loop # print('-' * 5 + 'TRAINING HAS STARTED' +", "- lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer = optimizer self._loss_func", "self._validate_on_batch(X) # losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH", "object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from the list:", "train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True,", "parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or not during training. default: True') #parser.add_argument('--pos_weight',", "for positive weighting in cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory", "[] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def", "'-' * 5) print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading", "# number of grid points in the workspace self._num_obj_max = args.num_obj_max # maximum", "self._num_obj_max = args.num_obj_max # maximum number of objects in the workspace self._obj_size_avg =", "print('-' * 5 + 'TRAINING HAS ENDED' + '-' * 5) print('best validation", "to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool,", "tape: out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads =", "the list: val_size = int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces * 0.2) train_size", "help='pooling type of the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of", "it does not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces):", "= int(self._num_workspaces * 0.2) train_size = self._num_workspaces - val_size - test_size self._train_data =", "during training. # workspace related self._gen_workspace = args.gen_workspace # whether to newly generate", "self._loss_func = loss_func self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs = [], []", "workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" #", "+ '-' * 5) print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) #", "grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out a", "workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to use", "conv layers. default: [4, 8, 16]') return parser def _set_up_from_args(self, args): \"\"\"setting up", "mini-batch.\"\"\" with tf.GradientTape() as tape: out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy())", "from tensorflow.keras.losses import BinaryCrossentropy from hwr.cae.cae import CAE from hwr.random_workspace import random_workspace class", "instance of hwr.cae.cae.CAE - optimizer: A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance", "than 0.5, it is considered as 1: out_rounded = tf.cast(out >= 0.5, tf.float32)", "losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train", "str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces and", "random_workspace class CAEtrainer(): \"\"\"A trainer class for training a Convolutional Autoencoder.\"\"\" def __init__(self,", "Then it trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6", "if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING", "adding an extra dimension to the numpy array. # neede because the Conv2D", "extra dimension to the numpy array. # neede because the Conv2D layer waits", "parser.add_argument('--pooling', type=str, default='max', help='pooling type of the CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16,", "= optimizer self._loss_func = loss_func self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs =", "self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc =", "tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred / (1 - y_pred)) loss =", "Loop # print('-' * 5 + 'TRAINING HAS STARTED' + '-' * 5)", "beta. \"\"\" def loss(y_true, y_pred): # getting logits from sigmoid output: y_pred =", "../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to use for training. default: 1000')", "\"\"\"carries out a gradient step on a mini-batch.\"\"\" with tf.GradientTape() as tape: out", "# list of file names in the workspace directory: files = [os.path.join(self._workspace_dir, name)", "where the generated workspaces are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of", "out a validation step on a mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X,", "16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number of filters in the conv", "workspaces can be loaded self._num_workspaces = args.num_workspaces # numbr of worksapces to train", "acc / Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model,", "loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc : {}", "args.grid_size # number of grid points in the workspace self._num_obj_max = args.num_obj_max #", "weighted by beta. \"\"\" def loss(y_true, y_pred): # getting logits from sigmoid output:", "= Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up shuffleing for training if it", "args.workspace_dir # folder from which saved workspaces can be loaded self._num_workspaces = args.num_workspaces", "training related: self._epochs = args.epochs # number of training epochs self._batch_size = args.batch_size", "default=16, help='latent dimension of the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8,", "out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self,", "out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy for a", "file_name) self._CAE.save_weights(path) print('model was saved to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new", "it is needed: if self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method", "= tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out a validation", "args.gen_workspace # whether to newly generate workspaces (True) or use saved ones (False)", "self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5", "self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [],", "default=8, help='average size of the objects in the workspace. default: 8') # CAE", "file names in the workspace directory: files = [os.path.join(self._workspace_dir, name) for name in", "X): \"\"\"carries out a validation step on a mini-batch.\"\"\" out = self._CAE(X) loss", "newly generate workspaces (True) or use saved ones (False) self._workspace_dir = args.workspace_dir #", "by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets", "reading in the workspaces into a list of np.arrays: workspace_list = [] for", "+ 'TRAINING HAS ENDED' + '-' * 5) print('best validation loss: {}'.format(best_val_loss)) print('best", "\"\"\"setting up some variables from the parsed arguments.\"\"\" # training related: self._epochs =", "instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200, help='number", "args.epochs # number of training epochs self._batch_size = args.batch_size # batch size self._train_shuffle", "are stored. default: ../workspaces') parser.add_argument('--num_workspaces', type=int, default=1000, help='number of workspaces to use for", "val_size): ]).batch(self._batch_size) # setting up shuffleing for training if it is needed: if", ">= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING HAS ENDED'", "val_size = int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces * 0.2) train_size = self._num_workspaces", "#@tf.function def _train_on_batch(self, X): \"\"\"carries out a gradient step on a mini-batch.\"\"\" with", "saved workspaces can be loaded self._num_workspaces = args.num_workspaces # numbr of worksapces to", "print('Train acc / Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the", "1: out_rounded = tf.cast(out >= 0.5, tf.float32) metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X,", "from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits = tf.math.log(y_pred", "workspaces, whichewer is smaller: num_to_read = num_of_files if num_of_files < self._num_workspaces else self._num_workspaces", "0.5, it is considered as 1: out_rounded = tf.cast(out >= 0.5, tf.float32) metric", "arguments before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int,", "name) for name in os.listdir(self._workspace_dir)] num_of_files = len(files) # read in either self._num_workspaces", "default=32, help='number of grid points in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5,", "a weighted cross entropy loss function weighted by beta. \"\"\" def loss(y_true, y_pred):", "- optimizer: A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE =", "5 + 'TRAINING HAS STARTED' + '-' * 5) for epoch in range(self._epochs):", "of objects in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int, default=8, help='average size of", "default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the best trained model. default:", "parsing the arguments before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related", "loss function weighted by beta. \"\"\" def loss(y_true, y_pred): # getting logits from", "Training Loop # print('-' * 5 + 'TRAINING HAS STARTED' + '-' *", "is smaller: num_to_read = num_of_files if num_of_files < self._num_workspaces else self._num_workspaces # reading", "tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from the", "num_of_files = len(files) # read in either self._num_workspaces or num_of_files number of workspaces,", "= args.num_workspaces # numbr of worksapces to train on self._grid_size = args.grid_size #", "positive weighting in cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to", "# numbr of worksapces to train on self._grid_size = args.grid_size # number of", "# creating the workspace saving folder if it does not exist yet: if", "CAE from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer class for training a", "help='directory to save the best trained model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace',", "where the model has to be saved exists or not and sace the", "as tape: out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads", "+ val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up shuffleing for", ": {} / {}'.format(self._train_accs[-1], self._val_accs[-1])) # saving the model, if it is the", "os.path.exists(self._model_dir): os.makedirs(self._model_dir) file_name = 'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved", "self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data =", "type=int, default=32, help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or", "weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss function weighted by beta. \"\"\" def", "default=False, help='If gen_workspace==False, saved workspaces are used. default: False') parser.add_argument('--workspace_dir', type=str, default='../workspaces', help='folder", "type=int, default=5, help='maximum number of objects in the workspace. default: 5') parser.add_argument('--obj_size_avg', type=int,", "self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries", "loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights))", "it is the best so far: if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1]", "self._train_shuffle: self._train_data = self._train_data.shuffle(buffer_size=train_size) @staticmethod def get_arguments(): \"\"\"static method for parsing the arguments", "= tf.math.log(y_pred / (1 - y_pred)) loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits, pos_weight=beta) return tf.reduce_mean(loss)", "8') # CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of the CAE. default:", "hwr.cae.cae import CAE from hwr.random_workspace import random_workspace class CAEtrainer(): \"\"\"A trainer class for", "self._CAE.trainable_weights)) #@tf.function def _validate_on_batch(self, X): \"\"\"carries out a validation step on a mini-batch.\"\"\"", "logits from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon())) y_logits =", "self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates the accuracy for a mini-batch.\"\"\"", "# losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch))", "self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name)", "type=int, default=8, help='average size of the objects in the workspace. default: 8') #", "= args.batch_size # batch size self._train_shuffle = args.train_shuffle # whether to shuffle or", "the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0", "= args.train_shuffle # whether to shuffle or not during training. self._model_dir = args.model_dir", "type=float, default=1e-3, help='number of epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch", "of epochs to train. default: 1e-3') parser.add_argument('--batch_size', type=int, default=32, help='batch size. default: 32')", "import random_workspace class CAEtrainer(): \"\"\"A trainer class for training a Convolutional Autoencoder.\"\"\" def", "out): \"\"\"calculates the accuracy for a mini-batch.\"\"\" # if an entry is bigger", "default=True, help='Whether to shuffle or not during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2,", "* 5 + 'TRAINING HAS STARTED' + '-' * 5) for epoch in", "help='batch size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or not during", "workspaces to train on. Then it trains the CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces()", "of training epochs self._batch_size = args.batch_size # batch size self._train_shuffle = args.train_shuffle #", "pre-saved workspaces.\"\"\" # list of file names in the workspace directory: files =", "default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number of filters in the", "model. default: ../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces", "[], [] self._train_accs, self._val_accs = [], [] self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs,", "5) print('best validation loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading the best", "epochs to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of epochs to train.", "help='latent dimension of the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16],", "object. Args: - CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE - optimizer:", "'ws_' + str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {}", "self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training", "for X in self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) # losses and", "def __call__(self): \"\"\"Training loop for CAE. It first either loads pre-generated or generates", "was saved to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating", "self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1]))", "loads pre-generated or generates workspaces to train on. Then it trains the CAE.", "height, width, channel_size) # batch size will be added by the tf.data.Dataset object.", "* 5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs", "be saved exists or not and sace the model.\"\"\" if not os.path.exists(self._model_dir): os.makedirs(self._model_dir)", "X in self._val_data: self._validate_on_batch(X) # losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs))", "size of the objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns a weighted cross", "num_of_files < self._num_workspaces else self._num_workspaces # reading in the workspaces into a list", "to save the best model during training. # workspace related self._gen_workspace = args.gen_workspace", "workspace self._obj_size_avg = args.obj_size_avg # average size of the objects in the workspace", "5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs =", "waits for shape (batch_size, height, width, channel_size) # batch size will be added", "self._workspace_dir = args.workspace_dir # folder from which saved workspaces can be loaded self._num_workspaces", "= CAE self._optimizer = optimizer self._loss_func = loss_func self._train_losses, self._val_losses = [], []", "size. default: 32') parser.add_argument('--train_shuffle', type=bool, default=True, help='Whether to shuffle or not during training.", "training epochs self._batch_size = args.batch_size # batch size self._train_shuffle = args.train_shuffle # whether", "numbr of worksapces to train on self._grid_size = args.grid_size # number of grid", "= loss_func self._train_losses, self._val_losses = [], [] self._train_accs, self._val_accs = [], [] self._epoch_train_losses,", "self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size) # setting up shuffleing for training if", "1e6 best_val_acc = 0 # Training Loop # print('-' * 5 + 'TRAINING", "_generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the workspace saving folder if it does", "during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive weighting in", "CAE. default: max') parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension of the CAE. default: 16')", "return parser def _set_up_from_args(self, args): \"\"\"setting up some variables from the parsed arguments.\"\"\"", "or use saved ones (False) self._workspace_dir = args.workspace_dir # folder from which saved", "worksapces to train on self._grid_size = args.grid_size # number of grid points in", "np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from the list: val_size = int(self._num_workspaces", "which saved workspaces can be loaded self._num_workspaces = args.num_workspaces # numbr of worksapces", "#parser.add_argument('--pos_weight', type=float, default=2, # help='weight for positive weighting in cross entropy loss. default:", "A tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer", "# Training Loop # print('-' * 5 + 'TRAINING HAS STARTED' + '-'", "does not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace", "of workspaces, whichewer is smaller: num_to_read = num_of_files if num_of_files < self._num_workspaces else", "= os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace) print('generated {} workspaces and saved them into {}'.format(self._num_workspaces,", "= [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE. It first either", "before instantiating a CAEtrainer\"\"\" parser = argparse.ArgumentParser() # training related parser.add_argument('--epochs', type=int, default=200,", "class for training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing", "def _set_up_from_args(self, args): \"\"\"setting up some variables from the parsed arguments.\"\"\" # training", "name in os.listdir(self._workspace_dir)] num_of_files = len(files) # read in either self._num_workspaces or num_of_files", "self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out)) grads = tape.gradient(loss, self._CAE.trainable_weights) self._optimizer.apply_gradients(zip(grads,", "* 5 + 'TRAINING HAS ENDED' + '-' * 5) print('best validation loss:", "in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i) +", "and adding an extra dimension to the numpy array. # neede because the", "list: val_size = int(self._num_workspaces * 0.2) test_size = int(self._num_workspaces * 0.2) train_size =", "test_size = int(self._num_workspaces * 0.2) train_size = self._num_workspaces - val_size - test_size self._train_data", "help='Whether to shuffle or not during training. default: True') #parser.add_argument('--pos_weight', type=float, default=2, #", "is bigger than 0.5, it is considered as 1: out_rounded = tf.cast(out >=", "batch size self._train_shuffle = args.train_shuffle # whether to shuffle or not during training.", "type=float, default=2, # help='weight for positive weighting in cross entropy loss. default: 2')", "if it does not exist yet: if not os.path.exists(self._workspace_dir): os.mkdir(self._workspace_dir) for i in", "self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out a gradient step on a", "self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE. It first", "self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] self._set_up_from_args(args) def __call__(self):", "number of workspaces, whichewer is smaller: num_to_read = num_of_files if num_of_files < self._num_workspaces", "np.savetxt(path, workspace) print('generated {} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self):", "a list of np.arrays: workspace_list = [] for i in range(num_to_read): path =", "default: 8') # CAE related: parser.add_argument('--pooling', type=str, default='max', help='pooling type of the CAE.", "far: if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc:", "{}'.format(self._num_workspaces, self._workspace_dir)) def _load_workspaces(self): \"\"\"Loadeing pre-saved workspaces.\"\"\" # list of file names in", "= args.model_dir # directory to save the best model during training. # workspace", "the conv layers. default: [4, 8, 16]') return parser def _set_up_from_args(self, args): \"\"\"setting", "objects in the workspace self._obj_size_avg = args.obj_size_avg # average size of the objects", "train on self._grid_size = args.grid_size # number of grid points in the workspace", "tf.GradientTape() as tape: out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_train_losses.append(loss.numpy()) self._epoch_train_accs.append(self._calc_accuracy(X, out))", "os.mkdir(self._workspace_dir) for i in range(self._num_workspaces): workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_'", "the numpy array. # neede because the Conv2D layer waits for shape (batch_size,", "file_name = 'ws_' + str(i) + '.csv' path = os.path.join(self._workspace_dir, file_name) np.savetxt(path, workspace)", "if num_of_files < self._num_workspaces else self._num_workspaces # reading in the workspaces into a", "folder from which saved workspaces can be loaded self._num_workspaces = args.num_workspaces # numbr", "the workspace self._num_obj_max = args.num_obj_max # maximum number of objects in the workspace", "self._set_up_from_args(args) def __call__(self): \"\"\"Training loop for CAE. It first either loads pre-generated or", "the workspace directory: files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files =", "be added by the tf.data.Dataset object. workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating", "\"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0 # Training", "#self._save_model() print('-' * 5 + 'TRAINING HAS ENDED' + '-' * 5) print('best", "saved to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the", "num_to_read = num_of_files if num_of_files < self._num_workspaces else self._num_workspaces # reading in the", "num_of_files if num_of_files < self._num_workspaces else self._num_workspaces # reading in the workspaces into", "self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X, out): \"\"\"calculates", "training. self._model_dir = args.model_dir # directory to save the best model during training.", "# training related: self._epochs = args.epochs # number of training epochs self._batch_size =", "the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number of filters", "ones (False) self._workspace_dir = args.workspace_dir # folder from which saved workspaces can be", "default=200, help='number of epochs to train. default: 200') parser.add_argument('--learning_rate', type=float, default=1e-3, help='number of", "= args.workspace_dir # folder from which saved workspaces can be loaded self._num_workspaces =", "out = self._CAE(X) loss = self._loss_func(X, out) self._epoch_val_losses.append(loss.numpy()) self._epoch_val_accs.append(self._calc_accuracy(X, out)) def _calc_accuracy(self, X,", "val_size - test_size self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size) self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size +", "HAS STARTED' + '-' * 5) for epoch in range(self._epochs): self._epoch_train_losses, self._epoch_val_losses =", "/ Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1])) print('Train acc / Val acc", "workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32') workspace_list.append(workspace) # creating the Datasets from the list: val_size", "self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size) self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size)", "training a Convolutional Autoencoder.\"\"\" def __init__(self, CAE, optimizer, loss_func, args): \"\"\"Initializing a CAE", "# average size of the objects in the workspace def weighted_cross_entropy(beta): \"\"\"returns a", "workspace self._num_obj_max = args.num_obj_max # maximum number of objects in the workspace self._obj_size_avg", "dimension of the CAE. default: 16') parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16], help='number", "default: 1000') parser.add_argument('--grid_size', type=int, default=32, help='number of grid points in the workspace. default:", "self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] for X in", "of grid points in the workspace self._num_obj_max = args.num_obj_max # maximum number of", "related parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train. default: 200') parser.add_argument('--learning_rate', type=float,", "range(self._epochs): self._epoch_train_losses, self._epoch_val_losses = [], [] self._epoch_train_accs, self._epoch_val_accs = [], [] for X", "model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out a gradient step on", "best model during training. # workspace related self._gen_workspace = args.gen_workspace # whether to", "model during training. # workspace related self._gen_workspace = args.gen_workspace # whether to newly", "workspaces (True) or use saved ones (False) self._workspace_dir = args.workspace_dir # folder from", "../models/cae') # workspace related parser.add_argument('--gen_workspace', type=bool, default=False, help='If gen_workspace==False, saved workspaces are used.", "args.batch_size # batch size self._train_shuffle = args.train_shuffle # whether to shuffle or not", "'model.h5' path = os.path.join(self._model_dir, file_name) self._CAE.save_weights(path) print('model was saved to ' + self._model_dir)", "tf import tensorflow.keras.optimizers as opt import numpy as np import argparse import os", "of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses)) self._val_accs.append(np.mean(self._epoch_val_accs)) print('EPOCH {}'.format(epoch)) print('Train loss / Val", "accuracy for a mini-batch.\"\"\" # if an entry is bigger than 0.5, it", "' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\" # creating the workspace saving", "loss(y_true, y_pred): # getting logits from sigmoid output: y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1", "tensorflow.keras.optimizers instance - lass_func: A tensorflow.keras.losses instance \"\"\" self._CAE = CAE self._optimizer =", "help='weight for positive weighting in cross entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae',", "a validation step on a mini-batch.\"\"\" out = self._CAE(X) loss = self._loss_func(X, out)", "if self._val_losses[-1] < best_val_loss: best_val_loss = self._val_losses[-1] self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc", "self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5 + 'TRAINING HAS", "loss: {}'.format(best_val_loss)) print('best validation accuracy: {}'.format(best_val_acc)) # loading the best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5'))", "self._CAE.save_weights(path) print('model was saved to ' + self._model_dir) def _generate_new_workspaces(self): \"\"\"Generating new workspaces.\"\"\"", "best_val_acc = 0 # Training Loop # print('-' * 5 + 'TRAINING HAS", "default=2, # help='weight for positive weighting in cross entropy loss. default: 2') parser.add_argument('--model_dir',", "points in the workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects", "whether to newly generate workspaces (True) or use saved ones (False) self._workspace_dir =", "os import matplotlib.pyplot as plt from tensorflow.data import Dataset from tensorflow.keras.losses import BinaryCrossentropy", "files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)] num_of_files = len(files) # read", "array. # neede because the Conv2D layer waits for shape (batch_size, height, width,", "in self._train_data: self._train_on_batch(X) for X in self._val_data: self._validate_on_batch(X) # losses and accuracy of", "in self._val_data: self._validate_on_batch(X) # losses and accuracy of the epoch: self._train_losses.append(np.mean(self._epoch_train_losses)) self._train_accs.append(np.mean(self._epoch_train_accs)) self._val_losses.append(np.mean(self._epoch_val_losses))", "out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether the path where the model has", "neede because the Conv2D layer waits for shape (batch_size, height, width, channel_size) #", "entropy loss. default: 2') parser.add_argument('--model_dir', type=str, default='../models/cae', help='directory to save the best trained", "saved ones (False) self._workspace_dir = args.workspace_dir # folder from which saved workspaces can", "\"\"\"Training loop for CAE. It first either loads pre-generated or generates workspaces to", "CAE self._optimizer = optimizer self._loss_func = loss_func self._train_losses, self._val_losses = [], [] self._train_accs,", "self._save_model() if self._val_accs[-1] >= best_val_acc: best_val_acc = self._val_accs[-1] #self._save_model() print('-' * 5 +", "self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0 # Training Loop #", "in the workspace self._num_obj_max = args.num_obj_max # maximum number of objects in the", "numpy as np import argparse import os import matplotlib.pyplot as plt from tensorflow.data", "parser def _set_up_from_args(self, args): \"\"\"setting up some variables from the parsed arguments.\"\"\" #", "workspace. default: 32') parser.add_argument('--num_obj_max', type=int, default=5, help='maximum number of objects in the workspace.", "CAE. It first either loads pre-generated or generates workspaces to train on. Then", "workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg) file_name = 'ws_' + str(i) + '.csv' path", "def weighted_cross_entropy(beta): \"\"\"returns a weighted cross entropy loss function weighted by beta. \"\"\"", "best model: self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5')) #@tf.function def _train_on_batch(self, X): \"\"\"carries out a gradient step", "metric = tf.keras.metrics.Accuracy() _ = metric.update_state(X, out_rounded) return metric.result().numpy() def _save_model(self): \"\"\"checking whether", "CAE. \"\"\" if self._gen_workspace: self._generate_new_workspaces() self._load_workspaces() best_val_loss = 1e6 best_val_acc = 0 #" ]
[ "data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch),", "os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, )", "epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length = 7.5 *", "mean(np.array([float(p) for p in p_list])) radius = mean(np.array([float(r) for r in r_list])) monomers_per_turn", "+ \"/output.nc\") number_replicas = 20 min_temp = 100.0 * unit.kelvin max_temp = 250.0", "replica_energies, replica_positions, file_name=output_file ) # if not os.path.exists(output_data): p_list = [] r_list =", "bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma,", "import unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list,", "for r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list])) data =", "output total_simulation_time = 1.0 * unit.nanosecond # Units = picoseconds simulation_time_step = 5.0", "include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory) + \"/torsions_\"", "sigma_list]) y = np.unique([epsilon._value for epsilon in epsilon_list]) X, Y = np.meshgrid(x, y)", "58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range", "bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000", "2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology,", "for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in folding_temperature]) x", "str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp = 100.0 * unit.kelvin max_temp =", "equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\":", "0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle *", "not os.path.exists(output_data): p_list = [] r_list = [] mpt_list = [] for structure", "torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0,", "settings print_frequency = 20 # Number of steps to skip when printing output", "else: exchange_attempts = 10 ### # # Coarse grained model settings # ###", "/ unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant,", "\" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees\" ) print(", "if total_steps > 10000: exchange_attempts = round(total_steps / 1000) else: exchange_attempts = 10", "temp in folding_temperature]) x = np.unique([sigma._value for sigma in sigma_list]) y = np.unique([epsilon._value", "= 100.0 * unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5", "replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data,", "\"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths,", "str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file )", "p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in p_list])) radius = mean(np.array([float(r)", "str(round(float(pitch), 3)) + \" \" + str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn),", "/ 3.1415, 1)) + \" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\":", "monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in p_list]))", "8 backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces = False", "in new_temp_list]) except: temperatures = np.array([temperature for temperature in new_temp_list]) folding_temperature = []", "of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees.\" )", "cgmodel.positions = structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch =", "total_simulation_time = 1.0 * unit.nanosecond # Units = picoseconds simulation_time_step = 5.0 *", "\"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1] sidechain_lengths =", "r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list])) data = open(\"helical_data.dat\",", "bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0", "* 3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25,", "sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds,", "\"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\":", "replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions)", "* unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant,", "<gh_stars>1-10 import os import numpy as np import matplotlib.pyplot as pyplot from statistics", "1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\":", "\"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list = [] data_file =", "CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity", "radius = mean(np.array([float(r) for r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in", "masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom bond_lengths =", "replica_positions) output_file = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" +", "equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list =", "simulation settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp = 100.0", "\"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list = [] data_file = \"helical_data.dat\" if", "torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\":", "[] for C_v in C_v_list: C_v = np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v))", "= 2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon", "bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer", "= { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 *", "3)) + \" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name =", "+ \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies,", "for temperature in temperature_list]) try: temperatures = np.array([temperature._value for temperature in new_temp_list]) except:", "\"sc_sc_bond_length\": bond_length, } sigma = 2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\":", "10 ### # # Coarse grained model settings # ### mass = 100.0", "include_nonbonded_forces = True include_torsion_forces = True constrain_bonds = True # OpenMM simulation settings", "\"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\":", "calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import *", "( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters", "= [] mpt_list = [] for structure in minimum_energy_structures: cgmodel.positions = structure pitch,", "+ str(round(float(pitch), 3)) + \" \" + str(round(float(radius), 3)) + \" \" +", "print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2))", "Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant =", "= np.array([temp for temp in folding_temperature]) x = np.unique([sigma._value for sigma in sigma_list])", "settings # ### mass = 100.0 * unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\":", "epsilon_list]) X, Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm", "foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import * grid_size =", "] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing", "run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True", "3)) + \" \" + str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn), 3))", "not success: try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step,", "Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant,", "np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol", "get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in p_list])) radius =", "for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for", "+ str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure", "cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True except: os.remove(output_data)", "data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) +", "grained model\") print( \"with bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0", "os.path.exists(output_data): p_list = [] r_list = [] mpt_list = [] for structure in", "+ str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close()", "model settings # ### mass = 100.0 * unit.amu masses = {\"backbone_bead_masses\": mass,", "in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle,", "\"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in temperature_list]) try: temperatures", "y = np.unique([epsilon._value for epsilon in epsilon_list]) X, Y = np.meshgrid(x, y) Z", "of steps to skip when printing output total_simulation_time = 1.0 * unit.nanosecond #", "* unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant", "equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations", "C_v = np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp", "\"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles =", "= run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success =", "str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not os.path.exists(output_data):", "properties bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length,", "+ \" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle,", "os import numpy as np import matplotlib.pyplot as pyplot from statistics import mean", "equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list = [] data_file = \"helical_data.dat\"", "folding_temperature]) x = np.unique([sigma._value for sigma in sigma_list]) y = np.unique([epsilon._value for epsilon", "unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings output_data = str(str(top_directory)", "os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions = [0]", "{ \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0,", "equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle,", "= { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0 *", "* unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if", "= CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants,", "os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms)", "mean(np.array([float(mpt) for mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) +", "= np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ (", "success: try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time,", "print( \"with bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415,", "[] radius_list = [] data_file = \"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file,", "sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles:", "not os.path.exists(output_data): success = False while not success: try: replica_energies, replica_positions, replica_states =", "* unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant,", "max_temp, number_replicas) if total_steps > 10000: exchange_attempts = round(total_steps / 1000) else: exchange_attempts", "cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if not os.path.exists(output_data): p_list = [] r_list", "equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory) +", "= { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 *", "unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant =", "model\") print( \"with bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 /", "/ 3.1415, 1)) + \" degrees\" ) print( \"and sc_bb_bb_sc torsion angles of", "if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1] sidechain_lengths = [1]", "unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\":", "\" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees.\" ) equil_torsion_angles", "system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) +", "statistics import mean from simtk import unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight", "epsilon} # Bonded interaction properties bond_length = 7.5 * unit.angstrom bond_lengths = {", "\" degrees\" ) print( \"and sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle *", "\"and sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1))", "simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True except: os.remove(output_data) else: replica_energies, replica_positions,", "temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) + \"/torsions_\" +", ") if not os.path.exists(output_data): success = False while not success: try: replica_energies, replica_positions,", "= [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle", "( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y, Z) pyplot.colorbar() pyplot.savefig(file_name)", "2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded", "\"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants", ") print( \"and sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 /", "+ str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees.\" ) equil_torsion_angles =", "output_data = str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp = 100.0 * unit.kelvin", "\"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\":", "equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = []", "C_v in C_v_list: C_v = np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature =", "np import matplotlib.pyplot as pyplot from statistics import mean from simtk import unit", "+ \" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name = str(str(top_directory)", "when printing output total_simulation_time = 1.0 * unit.nanosecond # Units = picoseconds simulation_time_step", "number_replicas) if total_steps > 10000: exchange_attempts = round(total_steps / 1000) else: exchange_attempts =", "= mean(np.array([float(p) for p in p_list])) radius = mean(np.array([float(r) for r in r_list]))", "(Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants =", "] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 /", "= str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2))", "round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas", "unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\":", "mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length,", ") minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if not os.path.exists(output_data):", "\"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, }", "get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if not os.path.exists(output_data): p_list = []", "25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in", "from simtk import unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation,", "str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure =", "\" \" + str(round(float(pitch), 3)) + \" \" + str(round(float(radius), 3)) + \"", "in p_list])) radius = mean(np.array([float(r) for r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for", "bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\":", "\".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if not", "min_temp = 100.0 * unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp,", "(Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\":", "* unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction", "degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle,", "+ \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" )", "= str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in", "= np.array([temperature for temperature in new_temp_list]) folding_temperature = [] for C_v in C_v_list:", "polymer_length = 8 backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces", "* 180.0 / 3.1415, 1)) + \" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\":", "equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle,", "\" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name = str(str(top_directory) +", "unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps", "foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure", "True constrain_bonds = True # OpenMM simulation settings print_frequency = 20 # Number", "bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles =", "bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\":", "sigma = 2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma}", "bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained", "\" + str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\")", "equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length,", "data_file = \"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion", "} bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants = {", "Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0,", "import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import", "for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse", "torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles", "in sigma_list]) y = np.unique([epsilon._value for epsilon in epsilon_list]) X, Y = np.meshgrid(x,", "= str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp = 100.0 * unit.kelvin max_temp", "/ 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles", "pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y, Z) pyplot.colorbar()", "kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y, Z) pyplot.colorbar() pyplot.savefig(file_name) pyplot.show()", "temperatures = np.array([temperature._value for temperature in new_temp_list]) except: temperatures = np.array([temperature for temperature", "unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, )", "folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature (", "1000) else: exchange_attempts = 10 ### # # Coarse grained model settings #", "torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) + \"", "unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties", "str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch), 3)) + \" \" + str(round(float(radius),", "{ \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole", "(Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000", "= 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings", "for sigma in sigma_list]) y = np.unique([epsilon._value for epsilon in epsilon_list]) X, Y", "= False include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces = True constrain_bonds =", "mean(np.array([float(r) for r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list])) data", "\" + str(round(float(pitch), 3)) + \" \" + str(round(float(radius), 3)) + \" \"", "unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma =", "= round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings output_data = str(str(top_directory) + \"/output.nc\")", "to skip when printing output total_simulation_time = 1.0 * unit.nanosecond # Units =", "in C_v_list: C_v = np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp", "= False while not success: try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system,", "unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant =", "\"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel(", "Units = picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank", "y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\")", "i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in folding_temperature]) x =", "str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees\" ) print( \"and sc_bb_bb_sc", "bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0 * bond_length sigmas =", "for mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\"", "\"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle,", "in epsilon_list]) X, Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ (", "mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" +", "except: os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency,", "while not success: try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list,", "output_data=output_data, ) success = True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data(", "printing output total_simulation_time = 1.0 * unit.nanosecond # Units = picoseconds simulation_time_step =", "bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle,", "for temp in folding_temperature]) x = np.unique([sigma._value for sigma in sigma_list]) y =", "# Job settings top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8", "data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for", "import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import", "100.0 * unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 *", "torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) + \"", "torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str(", "[ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range =", "\"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian /", "structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for", "= 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant,", "np.array([temperature for temperature in new_temp_list]) folding_temperature = [] for C_v in C_v_list: C_v", "sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces", "{ \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0 * bond_length", "0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 /", "bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants =", "epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length = 7.5 * unit.angstrom bond_lengths", "success = True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology,", "else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology,", "PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants,", "replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success", "= get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if not os.path.exists(output_data): p_list =", "\"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions,", "= True constrain_bonds = True # OpenMM simulation settings print_frequency = 20 #", "= structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p)", "\"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant", "in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained model\") print( \"with bb_bb_bb_bb torsion", "interaction properties bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\":", "OpenMM simulation settings print_frequency = 20 # Number of steps to skip when", "\"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole /", "0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [", "\"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer /", "{ \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole", "= { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\":", ")\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y, Z) pyplot.colorbar() pyplot.savefig(file_name) pyplot.show() pyplot.close()", "bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle", "True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data,", "Job settings top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths", "### # # Coarse grained model settings # ### mass = 100.0 *", "0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47,", "import mean from simtk import unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import", "= 1.0 * unit.nanosecond # Units = picoseconds simulation_time_step = 5.0 * unit.femtosecond", "True include_torsion_forces = True constrain_bonds = True # OpenMM simulation settings print_frequency =", "= {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom bond_lengths = {", "import os import numpy as np import matplotlib.pyplot as pyplot from statistics import", "\"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list =", "r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in p_list])) radius = mean(np.array([float(r) for", "[] data_file = \"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB", "\"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58)", "angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees.\"", "sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole", "for temperature in new_temp_list]) folding_temperature = [] for C_v in C_v_list: C_v =", "temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000: exchange_attempts = round(total_steps /", "= 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0,", "= True # OpenMM simulation settings print_frequency = 20 # Number of steps", "np.array([temperature._value for temperature in temperature_list]) try: temperatures = np.array([temperature._value for temperature in new_temp_list])", "sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ]", "in minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn)", "= { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\":", "new_temp_list]) folding_temperature = [] for C_v in C_v_list: C_v = np.array([C_v[i][0] for i", "torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant,", "[] mpt_list = [] for structure in minimum_energy_structures: cgmodel.positions = structure pitch, radius,", "bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, )", "\"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle", "= \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1] sidechain_lengths", "[1] sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces = True", "\"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle =", "[] for structure in minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel)", "= {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length =", "2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch), 3)) +", "Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding", "\"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list = []", "180.0 / 3.1415, 1)) + \" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle,", "3)) + \"\\n\" ) data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1)", "bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\":", "equil_bond_angle, } pitch_list = [] radius_list = [] data_file = \"helical_data.dat\" if not", "+ \".nc\" ) if not os.path.exists(output_data): success = False while not success: try:", "get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from", "output_file = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle,", "( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X,", "in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle *", "bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant,", "\"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon,", "str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not os.path.exists(output_data): success = False while not", "temperatures = np.array([temperature for temperature in new_temp_list]) folding_temperature = [] for C_v in", "for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained model\") print( \"with", "minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch", "(Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\":", "\"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if", "backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces,", "\".nc\" ) if not os.path.exists(output_data): success = False while not success: try: replica_energies,", "= { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\":", "1)) + \" degrees\" ) print( \"and sc_bb_bb_sc torsion angles of \" +", "if not os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees)", "p_list])) radius = mean(np.array([float(r) for r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt", "= [0] include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces =", "simulations for a coarse grained model\") print( \"with bb_bb_bb_bb torsion angles of \"", "simulation settings print_frequency = 20 # Number of steps to skip when printing", "temperature_list]) try: temperatures = np.array([temperature._value for temperature in new_temp_list]) except: temperatures = np.array([temperature", "\"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0 * bond_length sigmas", "masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces,", "# Number of steps to skip when printing output total_simulation_time = 1.0 *", "+ \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not os.path.exists(output_data): success =", "False while not success: try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions,", "r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write(", "= 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle,", "from statistics import mean from simtk import unit from foldamers.cg_model.cgmodel import CGModel from", "of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees\" )", "# Yank (replica exchange) simulation settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas =", "= np.unique([sigma._value for sigma in sigma_list]) y = np.unique([epsilon._value for epsilon in epsilon_list])", "\"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" +", "\"with bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1))", "= pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in temperature_list]) try: temperatures = np.array([temperature._value", "= True include_nonbonded_forces = True include_torsion_forces = True constrain_bonds = True # OpenMM", ") from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology", "+ \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if", "if not os.path.exists(output_data): p_list = [] r_list = [] mpt_list = [] for", "from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import * grid_size", "import matplotlib.pyplot as pyplot from statistics import mean from simtk import unit from", "radius_list = [] data_file = \"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file, \"w\")", "from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from", "make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\"", "print( \"and sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415,", "= read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str(", "= mean(np.array([float(r) for r in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list]))", "bond_length, } sigma = 2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma,", "180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles =", "= get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in p_list])) radius", "+ str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not os.path.exists(output_data): success = False while", "str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) +", ")\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y, Z)", "epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length", "for epsilon in epsilon_list]) X, Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x))", "original_temperature_list = np.array([temperature._value for temperature in temperature_list]) try: temperatures = np.array([temperature._value for temperature", "/ 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in", "Number of steps to skip when printing output total_simulation_time = 1.0 * unit.nanosecond", "= True include_torsion_forces = True constrain_bonds = True # OpenMM simulation settings print_frequency", "\"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415", "* 3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for", "unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\":", "settings top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths =", "3.1415, 1)) + \" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle,", "exchange_attempts = 10 ### # # Coarse grained model settings # ### mass", "bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 *", "p in p_list])) radius = mean(np.array([float(r) for r in r_list])) monomers_per_turn = mean(np.array([float(mpt)", "SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants", "minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) # if not os.path.exists(output_data): p_list", "[1] sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces = True", "in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained model\")", "\"/output.nc\") number_replicas = 20 min_temp = 100.0 * unit.kelvin max_temp = 250.0 *", "positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons,", "[] r_list = [] mpt_list = [] for structure in minimum_energy_structures: cgmodel.positions =", "\"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC", "str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\"", "# OpenMM simulation settings print_frequency = 20 # Number of steps to skip", "= np.array([temperature._value for temperature in new_temp_list]) except: temperatures = np.array([temperature for temperature in", "{ \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant,", "+ \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch), 3)) + \"", "total_steps > 10000: exchange_attempts = round(total_steps / 1000) else: exchange_attempts = 10 ###", "open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \"", "temperature in temperature_list]) try: temperatures = np.array([temperature._value for temperature in new_temp_list]) except: temperatures", "number_replicas = 20 min_temp = 100.0 * unit.kelvin max_temp = 250.0 * unit.kelvin", "in r_list])) monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\")", "\" + str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\" )", "max_temp = 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps >", "from cg_openmm.simulation.rep_exch import * grid_size = 4 # Job settings top_directory = \"output\"", "bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415", "in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in folding_temperature]) x = np.unique([sigma._value", "replica_positions, file_name=output_file ) # if not os.path.exists(output_data): p_list = [] r_list = []", "sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\":", "= np.unique([epsilon._value for epsilon in epsilon_list]) X, Y = np.meshgrid(x, y) Z =", "constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) +", "bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained model\") print(", "= { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\":", ") # if not os.path.exists(output_data): p_list = [] r_list = [] mpt_list =", "/ unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant,", "equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses,", "= range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle", "bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian", "[ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle =", "* 180.0 / 3.1415, 1)) + \" degrees\" ) print( \"and sc_bb_bb_sc torsion", "exchange_attempts = round(total_steps / 1000) else: exchange_attempts = 10 ### # # Coarse", "np.unique([sigma._value for sigma in sigma_list]) y = np.unique([epsilon._value for epsilon in epsilon_list]) X,", "polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles,", "build_topology from cg_openmm.simulation.rep_exch import * grid_size = 4 # Job settings top_directory =", "* unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma", "{\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length = 7.5", "cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants,", "sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\":", "= 4 # Job settings top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length", "coarse grained model\") print( \"with bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle *", "pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in temperature_list]) try: temperatures = np.array([temperature._value for", "= [1] sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces =", "x = np.unique([sigma._value for sigma in sigma_list]) y = np.unique([epsilon._value for epsilon in", "str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\"", "str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch), 3))", "sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\":", "pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\")", "round(total_steps / 1000) else: exchange_attempts = 10 ### # # Coarse grained model", "np.array([temperature._value for temperature in new_temp_list]) except: temperatures = np.array([temperature for temperature in new_temp_list])", "Yank (replica exchange) simulation settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas = 20", "10000: exchange_attempts = round(total_steps / 1000) else: exchange_attempts = 10 ### # #", "mass} bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length,", "sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel =", "\"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length = 7.5 * unit.angstrom", "C_v_list: C_v = np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for", "/ unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant,", "cg_openmm.simulation.rep_exch import * grid_size = 4 # Job settings top_directory = \"output\" if", "read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory)", "output_data = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle,", "X, Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\")", "sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained model\") print( \"with bb_bb_bb_bb", "20 # Number of steps to skip when printing output total_simulation_time = 1.0", "\"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, }", "get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import", "= np.array([temperature._value for temperature in temperature_list]) try: temperatures = np.array([temperature._value for temperature in", "= [] radius_list = [] data_file = \"helical_data.dat\" if not os.path.exists(data_file): data =", "unit.nanosecond # Units = picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step))", "bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\":", ") success = True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system,", "str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\" ) data.close() file_name", "import get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import * grid_size = 4", "for C_v in C_v_list: C_v = np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature", "/ unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant", "= [] data_file = \"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file, \"w\") data.write(", "= picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica", "* unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings output_data =", "unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, }", "} bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0)", "20 min_temp = 100.0 * unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list =", "\"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures", "bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants =", ") data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value", "settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp = 100.0 *", "= open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \"", "bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\":", "2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not os.path.exists(output_data): success", "[0] include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces = True", "sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces,", "* grid_size = 4 # Job settings top_directory = \"output\" if not os.path.exists(top_directory):", "include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2))", "matplotlib.pyplot as pyplot from statistics import mean from simtk import unit from foldamers.cg_model.cgmodel", "equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle", "\"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants", "= \"helical_data.dat\" if not os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees)", "cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True except:", "as np import matplotlib.pyplot as pyplot from statistics import mean from simtk import", "constrain_bonds = True # OpenMM simulation settings print_frequency = 20 # Number of", "= 100.0 * unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp,", "bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250", "equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions()", "= 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants = { \"bb_bb_bond_k\": bond_force_constant,", "+ \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in temperature_list]) try:", "bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92", "replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, )", "steps to skip when printing output total_simulation_time = 1.0 * unit.nanosecond # Units", "bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\":", "str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose(", "/ 1000) else: exchange_attempts = 10 ### # # Coarse grained model settings", "include_torsion_forces = True constrain_bonds = True # OpenMM simulation settings print_frequency = 20", "1.0 * unit.nanosecond # Units = picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps", "# ### mass = 100.0 * unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass}", "include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces = True constrain_bonds = True #", "str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in temperature_list])", "np.array([temp for temp in folding_temperature]) x = np.unique([sigma._value for sigma in sigma_list]) y", "topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) + \"/torsions_\"", "data.close() torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0,", "pyplot from statistics import mean from simtk import unit from foldamers.cg_model.cgmodel import CGModel", "from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology from", "\"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, }", "\"bb_bb_bond_k\": bond_force_constant, \"bb_sc_bond_k\": bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole /", "try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology, cgmodel.system, cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency,", "1)) + \" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\":", "= mean(np.array([float(mpt) for mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2))", "True # OpenMM simulation settings print_frequency = 20 # Number of steps to", "{\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons =", "sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0)", "= True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list,", "include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle,", "3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle", "} pitch_list = [] radius_list = [] data_file = \"helical_data.dat\" if not os.path.exists(data_file):", "get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build", "epsilon in epsilon_list]) X, Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$", "sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) +", "replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file =", "np.unique([epsilon._value for epsilon in epsilon_list]) X, Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y),", "top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1]", "0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range", ") equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\":", "equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions", "equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle,", "sigma in sigma_list]) y = np.unique([epsilon._value for epsilon in epsilon_list]) X, Y =", "structure in minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius)", "= PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths,", "\"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle,", "np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in folding_temperature])", "len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin", "in folding_temperature]) x = np.unique([sigma._value for sigma in sigma_list]) y = np.unique([epsilon._value for", "2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file ) #", "Coarse grained model settings # ### mass = 100.0 * unit.amu masses =", "open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\"", "bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for", "180.0 / 3.1415, 1)) + \" degrees\" ) print( \"and sc_bb_bb_sc torsion angles", "+ str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch), 3)) + \" \" +", "\"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } sigma = 2.0 * bond_length sigmas = {\"bb_bb_sigma\":", "+ str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees\" ) print( \"and", "import numpy as np import matplotlib.pyplot as pyplot from statistics import mean from", "sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces,", "for p in p_list])) radius = mean(np.array([float(r) for r in r_list])) monomers_per_turn =", ") make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) +", "bond_force_constant, \"sc_sc_bond_k\": bond_force_constant, } bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian", "92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\":", "4 # Job settings top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length =", "+ \"\\n\" ) data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list", "not os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions", "for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [", "total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states", "= 20 min_temp = 100.0 * unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list", "= 10 ### # # Coarse grained model settings # ### mass =", "# Units = picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) #", "Y = np.meshgrid(x, y) Z = folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$", "from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import * grid_size = 4 # Job", "\"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not os.path.exists(output_data): success = False", "# # Coarse grained model settings # ### mass = 100.0 * unit.amu", "* unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000: exchange_attempts =", "degrees\" ) print( \"and sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0", "bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ]", "bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data =", "+ str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures =", "import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import", "import * grid_size = 4 # Job settings top_directory = \"output\" if not", "pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p", "print(\"Performing simulations for a coarse grained model\") print( \"with bb_bb_bb_bb torsion angles of", "3.1415, 1)) + \" degrees\" ) print( \"and sc_bb_bb_sc torsion angles of \"", "epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions,", "mean from simtk import unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import (", "foldamers.thermo.calc import calculate_heat_capacity from foldamers.parameters.secondary_structure import get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch", "> 10000: exchange_attempts = round(total_steps / 1000) else: exchange_attempts = 10 ### #", "False include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces = True constrain_bonds = True", "= [1] sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces =", "backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces", "include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces = True constrain_bonds", "as pyplot from statistics import mean from simtk import unit from foldamers.cg_model.cgmodel import", "bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data", "= np.array([C_v[i][0] for i in range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in", "file_name=output_file ) # if not os.path.exists(output_data): p_list = [] r_list = [] mpt_list", "in new_temp_list]) folding_temperature = [] for C_v in C_v_list: C_v = np.array([C_v[i][0] for", "bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) +", "0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0,", "float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20,", "Bonded interaction properties bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length,", "print_frequency=print_frequency, output_data=output_data, ) success = True except: os.remove(output_data) else: replica_energies, replica_positions, replica_states =", "pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y, Z) pyplot.colorbar() pyplot.savefig(file_name) pyplot.show() pyplot.close() exit()", "\"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_bb_torsion_k\": 0, } bb_bb_bb_bb_equil_torsion_angle_range =", "3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5)", "nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature ( Kelvin )\") pyplot.pcolormesh(X, Y,", "+ \" \" + str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn), 3)) +", "for structure in minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch)", "# Bonded interaction properties bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\":", "* unit.nanosecond # Units = picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps =", "folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in folding_temperature]) x = np.unique([sigma._value for sigma", "temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True except: os.remove(output_data) else: replica_energies,", "range(47, 58) bb_bb_bb_bb_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in", "grained model settings # ### mass = 100.0 * unit.amu masses = {\"backbone_bead_masses\":", "epsilon = 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon}", "= range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for", "float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0", "output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle,", "unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom bond_lengths", "\"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list", "2)) + \" \" + str(round(float(pitch), 3)) + \" \" + str(round(float(radius), 3))", "= [] for C_v in C_v_list: C_v = np.array([C_v[i][0] for i in range(len(C_v))])", "{ \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle,", "except: temperatures = np.array([temperature for temperature in new_temp_list]) folding_temperature = [] for C_v", "{ \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle,", "2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\":", "5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings output_data", "unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\":", "simtk import unit from foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences,", "temperature in new_temp_list]) folding_temperature = [] for C_v in C_v_list: C_v = np.array([C_v[i][0]", "5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range", ") data.close() torsion_force_constant = 2000 torsion_force_constants = { \"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\":", "= {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons", "os.path.exists(top_directory): os.mkdir(top_directory) polymer_length = 8 backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions =", "7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, }", "not os.path.exists(data_file): data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch", "(replica exchange) simulation settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp", "= 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for", "positions=positions, ) output_data = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\"", "\"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\": bond_angle_force_constant, \"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, }", "figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature in temperature_list]) try: temperatures =", "simulation_time_step = 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation", "0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a", "total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange) simulation settings output_data = str(str(top_directory) +", "* unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom", "\" \" + str(round(float(radius), 3)) + \" \" + str(round(float(monomers_per_turn), 3)) + \"\\n\"", "### mass = 100.0 * unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length", "success = False while not success: try: replica_energies, replica_positions, replica_states = run_replica_exchange( cgmodel.topology,", "for temperature in new_temp_list]) except: temperatures = np.array([temperature for temperature in new_temp_list]) folding_temperature", "equil_bond_angles=equil_bond_angles, equil_torsion_angles=equil_torsion_angles, include_nonbonded_forces=include_nonbonded_forces, include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory)", "= [] r_list = [] mpt_list = [] for structure in minimum_energy_structures: cgmodel.positions", "= 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000:", "cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import * grid_size = 4 # Job settings", "skip when printing output total_simulation_time = 1.0 * unit.nanosecond # Units = picoseconds", "mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2))", "} bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer bond_force_constants = {", "grid_size = 4 # Job settings top_directory = \"output\" if not os.path.exists(top_directory): os.mkdir(top_directory)", "in temperature_list]) try: temperatures = np.array([temperature._value for temperature in new_temp_list]) except: temperatures =", "\"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" )", "+ \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" )", "str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees.\" ) equil_torsion_angles = {", "= 8 backbone_lengths = [1] sidechain_lengths = [1] sidechain_positions = [0] include_bond_forces =", "= open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms)", "Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close() torsion_force_constant = 2000 torsion_force_constants = {", "include_bond_forces=include_bond_forces, include_bond_angle_forces=include_bond_angle_forces, include_torsion_forces=include_torsion_forces, constrain_bonds=constrain_bonds, positions=positions, ) output_data = str( str(top_directory) + \"/torsions_\" +", "data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\\n\" ) data.close()", "= [] for structure in minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn =", "equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list = [] radius_list = [] data_file", "+ \" degrees\" ) print( \"and sc_bb_bb_sc torsion angles of \" + str(round(sc_bb_bb_sc_equil_torsion_angle", "= 20 # Number of steps to skip when printing output total_simulation_time =", "replica_positions, replica_states = read_replica_exchange_data( system=cgmodel.system, topology=cgmodel.topology, temperature_list=temperature_list, output_data=output_data, print_frequency=print_frequency, ) make_replica_pdb_files(cgmodel.topology, replica_positions) output_file", "180.0) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles:", "for a coarse grained model\") print( \"with bb_bb_bb_bb torsion angles of \" +", "exchange) simulation settings output_data = str(str(top_directory) + \"/output.nc\") number_replicas = 20 min_temp =", "file_name = str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list = np.array([temperature._value for temperature", "equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths,", "2000 * unit.kilojoule_per_mole / unit.radian / unit.radian bond_angle_force_constants = { \"bb_bb_bb_angle_k\": bond_angle_force_constant, \"bb_bb_sc_angle_k\":", ") output_data = str( str(top_directory) + \"/torsions_\" + str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" +", "pitch = mean(np.array([float(p) for p in p_list])) radius = mean(np.array([float(r) for r in", "# if not os.path.exists(output_data): p_list = [] r_list = [] mpt_list = []", "range(-20, 25, 5) sc_bb_bb_sc_equil_torsion_angles = [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle", "= get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000: exchange_attempts = round(total_steps / 1000)", "+ str(round(bb_bb_bb_bb_equil_torsion_angle, 2)) + \"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".nc\" ) if not", "= folding_temperature.reshape(len(y), len(x)) pyplot.xlabel(\"$\\sigma$ ( nm )\") pyplot.ylabel(\"$\\epsilon$ ( kcal/mol )\") pyplot.title(\"Folding Temperature", "os.path.exists(output_data): success = False while not success: try: replica_energies, replica_positions, replica_states = run_replica_exchange(", "equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle, \"sc_sc_sc_angle_0\": equil_bond_angle, \"sc_bb_sc_angle_0\": equil_bond_angle, \"sc_sc_bb_angle_0\": equil_bond_angle, } pitch_list", "\"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel", "\"\\n\" ) data.close() file_name = str(str(top_directory) + \"/heat_capacity.png\") figure = pyplot.figure(1) original_temperature_list =", "mass = 100.0 * unit.amu masses = {\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length =", "import build_topology from cg_openmm.simulation.rep_exch import * grid_size = 4 # Job settings top_directory", "= round(total_steps / 1000) else: exchange_attempts = 10 ### # # Coarse grained", "a coarse grained model\") print( \"with bb_bb_bb_bb torsion angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle", "CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons, bond_lengths=bond_lengths, bond_force_constants=bond_force_constants, bond_angle_force_constants=bond_angle_force_constants, torsion_force_constants=torsion_force_constants, equil_bond_angles=equil_bond_angles,", "if not os.path.exists(output_data): success = False while not success: try: replica_energies, replica_positions, replica_states", "sidechain_positions = [0] include_bond_forces = False include_bond_angle_forces = True include_nonbonded_forces = True include_torsion_forces", "250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000: exchange_attempts", "new_temp_list]) except: temperatures = np.array([temperature for temperature in new_temp_list]) folding_temperature = [] for", "= 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length,", "from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc import calculate_heat_capacity from", "\"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles = {", "r_list = [] mpt_list = [] for structure in minimum_energy_structures: cgmodel.positions = structure", "= [ float(equil_torsion_angle * 3.1415 / 180.0) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range ] sc_bb_bb_sc_equil_torsion_angle_range", "unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000: exchange_attempts = round(total_steps", "equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle", "foldamers.cg_model.cgmodel import CGModel from foldamers.parameters.reweight import ( get_mbar_expectation, get_free_energy_differences, get_temperature_list, ) from foldamers.thermo.calc", "+ str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \".pdb\" ) minimum_energy_structures = get_minimum_energy_pose( cgmodel.topology, replica_energies, replica_positions, file_name=output_file", "folding_temperature = np.array([temp for temp in folding_temperature]) x = np.unique([sigma._value for sigma in", "p_list = [] r_list = [] mpt_list = [] for structure in minimum_energy_structures:", "range(len(C_v))]) folding_temperature.append(max(C_v)) folding_temperature = np.array([temp for temp in folding_temperature]) x = np.unique([sigma._value for", "\"_\" + str(round(sc_bb_bb_sc_equil_torsion_angle, 2)) + \" \" + str(round(float(pitch), 3)) + \" \"", "} sigma = 2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\":", "sc_bb_bb_sc_equil_torsion_angles: print(\"Performing simulations for a coarse grained model\") print( \"with bb_bb_bb_bb torsion angles", "folding_temperature = [] for C_v in C_v_list: C_v = np.array([C_v[i][0] for i in", "\"bb_sc_sc_angle_k\": bond_angle_force_constant, \"sc_sc_sc_angle_k\": bond_angle_force_constant, \"sc_bb_sc_angle_k\": bond_angle_force_constant, \"sc_sc_bb_angle_k\": bond_angle_force_constant, } equil_bond_angle = 92 equil_bond_angles", "\"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon,", "\"bb_bb_bb_bb_torsion_k\": torsion_force_constant, \"bb_bb_bb_sc_torsion_k\": 0, \"bb_bb_sc_sc_torsion_k\": 0, \"bb_sc_sc_sc_torsion_k\": 0, \"sc_bb_bb_sc_torsion_k\": torsion_force_constant, \"bb_sc_sc_bb_torsion_k\": 0, \"sc_sc_sc_sc_torsion_k\":", "radius, monomers_per_turn = get_helical_parameters(cgmodel) p_list.append(pitch) r_list.append(radius) mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in", "\"bb_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_sc_torsion_0\": sc_bb_bb_sc_equil_torsion_angle, \"bb_sc_sc_bb_torsion_0\": equil_torsion_angle, \"sc_sc_sc_sc_torsion_0\": equil_torsion_angle, \"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions =", "cgmodel.positions, temperature_list=temperature_list, simulation_time_step=simulation_time_step, total_simulation_time=total_simulation_time, print_frequency=print_frequency, output_data=output_data, ) success = True except: os.remove(output_data) else:", "2.0 * bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon =", "angles of \" + str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1)) + \" degrees\"", "\"sc_bb_bb_bb_torsion_0\": equil_torsion_angle, } positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions,", "= 2.0 * unit.kilocalorie_per_mole epsilons = {\"bb_bb_eps\": epsilon, \"bb_sc_eps\": epsilon, \"sc_sc_eps\": epsilon} #", "get_helical_parameters from cg_openmm.build.cg_build import build_topology from cg_openmm.simulation.rep_exch import * grid_size = 4 #", "picoseconds simulation_time_step = 5.0 * unit.femtosecond total_steps = round(total_simulation_time.__div__(simulation_time_step)) # Yank (replica exchange)", "monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list])) data = open(\"helical_data.dat\", \"a\") data.write( str(round(bb_bb_bb_bb_equil_torsion_angle,", "100.0 * unit.kelvin max_temp = 250.0 * unit.kelvin temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)", "in sc_bb_bb_sc_equil_torsion_angle_range ] equil_torsion_angle = 0.0 for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles: for sc_bb_bb_sc_equil_torsion_angle in", "temperature in new_temp_list]) except: temperatures = np.array([temperature for temperature in new_temp_list]) folding_temperature =", "} equil_bond_angle = 92 equil_bond_angles = { \"bb_bb_bb_angle_0\": equil_bond_angle, \"bb_bb_sc_angle_0\": equil_bond_angle, \"bb_sc_sc_angle_0\": equil_bond_angle,", "numpy as np import matplotlib.pyplot as pyplot from statistics import mean from simtk", "{\"backbone_bead_masses\": mass, \"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\":", "2)) + \".nc\" ) if not os.path.exists(output_data): success = False while not success:", "\"sidechain_bead_masses\": mass} bond_length = 7.5 * unit.angstrom bond_lengths = { \"bb_bb_bond_length\": bond_length, \"bb_sc_bond_length\":", "# Coarse grained model settings # ### mass = 100.0 * unit.amu masses", "* bond_length sigmas = {\"bb_bb_sigma\": sigma, \"bb_sc_sigma\": sigma, \"sc_sc_sigma\": sigma} epsilon = 2.0", "\"sc_sc_eps\": epsilon} # Bonded interaction properties bond_length = 7.5 * unit.angstrom bond_lengths =", "True include_nonbonded_forces = True include_torsion_forces = True constrain_bonds = True # OpenMM simulation", "\" degrees.\" ) equil_torsion_angles = { \"bb_bb_bb_bb_torsion_0\": bb_bb_bb_bb_equil_torsion_angle, \"bb_bb_bb_sc_torsion_0\": equil_torsion_angle, \"bb_bb_sc_sc_torsion_0\": equil_torsion_angle, \"bb_sc_sc_sc_torsion_0\":", "print_frequency = 20 # Number of steps to skip when printing output total_simulation_time", "+ \" \" + str(round(float(pitch), 3)) + \" \" + str(round(float(radius), 3)) +", "bond_length, \"bb_sc_bond_length\": bond_length, \"sc_sc_bond_length\": bond_length, } bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer", "pitch_list = [] radius_list = [] data_file = \"helical_data.dat\" if not os.path.exists(data_file): data", "data = open(data_file, \"w\") data.write( \"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius", "try: temperatures = np.array([temperature._value for temperature in new_temp_list]) except: temperatures = np.array([temperature for", "get_temperature_list(min_temp, max_temp, number_replicas) if total_steps > 10000: exchange_attempts = round(total_steps / 1000) else:", "mpt_list.append(monomers_per_turn) pitch = mean(np.array([float(p) for p in p_list])) radius = mean(np.array([float(r) for r", "} positions = PDBFile(\"pose_27.pdb\").getPositions() cgmodel = CGModel( polymer_length=polymer_length, backbone_lengths=backbone_lengths, sidechain_lengths=sidechain_lengths, sidechain_positions=sidechain_positions, masses=masses, sigmas=sigmas,", "mpt_list = [] for structure in minimum_energy_structures: cgmodel.positions = structure pitch, radius, monomers_per_turn" ]
[]
[ "returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode])", "area_name: search by province, city, area_name. state name could be 2-letter abbreviation, or", "except ValueError: pass # area_code if area_code: filters.append(t.c.area_code == area_code) # latitude if", "<= population_less) # dwellings if dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater) if", "None: filters.append(t.c.population <= population_less) # dwellings if dwellings_greater is not None: filters.append(t.c.dwellings >=", "province self.city = city self.area_code = area_code self.area_name = area_name self.latitude = latitude", "# sort_by given, then sort by keyword if sort_by: result = list() for", "elevation_less is not None: filters.append(t.c.elevation <= elevation_less) # population if population_greater is not", "filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater)", "%.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng),", "is not None: filters.append(t.c.latitude <= lat_less) # longitude if lng_greater is not None:", "<= dwellings_less) # timezone if timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater) if", "distance, don't use limit clause else: heap = list() for row in self.connect.execute(sql):", "by keyword if sort_by: result = list() for row in self.connect.execute(sql): dist =", "row in self.connect.execute(sql)] result = list() for postalcode in random.sample(all_postalcode, returns): result.append(self.by_postalcode(postalcode)) return", "light savings. \"\"\" filters = list() # near lat, lng if lat is", "# prefix if prefix is not None: if not isinstance(prefix, string_types): raise TypeError(\"prefix", "# -*- coding: utf-8 -*- import random import heapq from math import radians,", "lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng", "heap = heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda", "not.\") # prefix if prefix is not None: if not isinstance(prefix, string_types): raise", "postalcode: 7 letter, example: \"A0A 0A3\" - city: city name, example: \"Ottawa\" -", "exactly matches. :param day_light_savings: bool or int, whether using day light savings. \"\"\"", "import Base from .pkg.geo_search import great_circle from .pkg.six import string_types except: from cazipcode.data", "# elevation if elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is", "elevation_less: search postalcode within a 3-d space box. :param province, city, area_name: search", "select([t]).where(and_(*filters)) if sort_by: if ascending: clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql", "* 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius *", "except: raise ValueError(\"Can not find '%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True,", "whether using day light savings. \"\"\" filters = list() # near lat, lng", "returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, )", "lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is None and lng is None and", "great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if len(result) ==", "if dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater is", "timezone) # day_light_savings if day_light_savings is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings ==", "(row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if len(result) == returns: break #", "PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not find '%s'!\" % postalcode) def by_prefix(self,", "\"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None,", "area_name self.latitude = latitude self.longitude = longitude self.elevation = elevation self.population = population", ") def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by,", "list() for row in self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if", "in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle( (lat, lng), (row.latitude,", "close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None,", "def by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper())", "None and radius is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) *", "day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat, lng, radius: search", "self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self,", "== day_light_savings) # execute query sql = select([t]).where(and_(*filters)) if sort_by: if ascending: clause", "for row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle( (lat,", "else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row)", "elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by,", "= list() for row in self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude, row.longitude))", "string_types except: from cazipcode.data import ( engine, t, find_province, find_city, find_area_name, fields, )", "search by province, city, area_name. state name could be 2-letter abbreviation, or full", "= t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row) for row", "# timezone if timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is", "sql = sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row) for row in self.connect.execute(sql)]", "sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat, lng, radius: search near", "= lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower,", "return self.to_json(indent=4) def __eq__(self, other): return self.postalcode == other.postalcode def __lt__(self, other): return", "= lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng +", "is not None: filters.append(t.c.elevation <= elevation_less) # population if population_greater is not None:", "cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal", "substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None,", "not None def __bool__(self): \"\"\"For Python3 bool() method. \"\"\" return self.postalcode is not", "heapq from math import radians, cos from functools import total_ordering from sqlalchemy import", "try: from .data import ( engine, t, find_province, find_city, find_area_name, fields, ) from", "\"\"\" def __init__(self): self.connect = engine.connect() def __enter__(self): return self def __exit__(self, *exc_info):", "lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less,", "t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row) for row in", "self.postalcode == other.postalcode def __lt__(self, other): return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For", "lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact postal code.", ":param day_light_savings: bool or int, whether using day light savings. \"\"\" filters =", "timezone_greater, timezone_less: timezone falls in a range. :param timezone: int, all postal code", "timezone: filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings is not None: day_light_savings =", "use \"near\" search if radius: # sort_by given, then sort by keyword if", "keyword if sort_by: result = list() for row in self.connect.execute(sql): dist = great_circle(", "substring is not None: if not isinstance(substring, string_types): raise TypeError(\"substring has to be", "None: filters.append(t.c.elevation <= elevation_less) # population if population_greater is not None: filters.append(t.c.population >=", "name, example: \"Ottawa\" - latitude: latitude - longitude: longitude - elevation: elevation -", "postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode", "string_types): raise TypeError(\"substring has to be a string\") if 1 <= len(substring) <=", "lng), (row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if len(result) == returns: break", "indicate that whether this zipcode use day light savings. Compare two postal code", "elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None,", "province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None,", "elevation if elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is not", "cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal code. Attributes: - postalcode:", "lng, radius: search near lat, lng with in xxx miles. :param lat_greater, lat_less,", "ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self,", "is not None and radius is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg =", "- latitude: latitude - longitude: longitude - elevation: elevation - population: integer, population", "self.postalcode is not None def __bool__(self): \"\"\"For Python3 bool() method. \"\"\" return self.postalcode", "has to be a 1-7 letter length!\") # province if province: try: province", "<= radius: result.append(PostalCode._make(row)) if len(result) == returns: break # sort_by not given, then", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code,", "print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat,", "method. :param lat, lng, radius: search near lat, lng with in xxx miles.", "latitude - longitude: longitude - elevation: elevation - population: integer, population - dwellings:", "radius: heap.append((dist, row)) # Use heap sort to find top-K if ascending: heap", ":param dwellings_greater, dwellings_less: dwellings falls in a range. :param timezone_greater, timezone_less: timezone falls", "% (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) #", "lng_less) # elevation if elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "population_less) # dwellings if dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less", "/ dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower", "DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect = engine.connect() def", "def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending,", "import random import heapq from math import radians, cos from functools import total_ordering", "that whether this zipcode use day light savings. Compare two postal code is", "elevation_less) # population if population_greater is not None: filters.append(t.c.population >= population_greater) if population_less", "dwellings - timezone: integer, timezone - day_light_savings: integer, indicate that whether this zipcode", "if sort_by: result = list() for row in self.connect.execute(sql): dist = great_circle( (lat,", "== postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not find", "try: city = find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass # area_name", "filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be a 1-7 letter length!\") # province", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "not sort_by: if ascending: clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql =", ".pkg.six import string_types except: from cazipcode.data import ( engine, t, find_province, find_city, find_area_name,", "lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is None and lng", "None: filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater is not None: filters.append(t.c.timezone >=", "miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within a 3-d", "row.longitude)) if dist <= radius: heap.append((dist, row)) # Use heap sort to find", "def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, )", "returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone,", "# print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower,", "returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle( (lat, lng), (row.latitude, row.longitude))", "# day_light_savings if day_light_savings is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings)", "fuzzy and typo tolerant. :param area_code: int, all postal code area_code exactly matches.", "timezone exactly matches. :param day_light_savings: bool or int, whether using day light savings.", "timezone if timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is not", "[PostalCode._make(row) for row in self.connect.execute(sql)] return result def near(self, lat, lng, radius, sort_by=fields.postalcode,", "69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05", ") def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns,", "comparing it's postal code string. \"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\",", "return self.postalcode is not None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\" def", "if city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass", "Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent", "raise ValueError(\"prefix has to be a 1-7 letter length!\") # substring if substring", "lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude", "returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less,", "province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode =", "import great_circle from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal code.", "not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius", "longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province = province self.city", "ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self,", "matches. :param day_light_savings: bool or int, whether using day light savings. \"\"\" filters", "/ dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat -", "find top-K if ascending: heap = heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap", "not None: filters.append(t.c.elevation <= elevation_less) # population if population_greater is not None: filters.append(t.c.population", "\"A0A 0A3\" - city: city name, example: \"Ottawa\" - province: 2 letters province", "Python2 bool() method. \"\"\" return self.postalcode is not None def __bool__(self): \"\"\"For Python3", "= 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius * 1.05 /", "and radius is None: pass else: raise ValueError(\"lat, lng, radius has to be", "# sort_by not given, then sort by distance, don't use limit clause else:", "code with this prefix, for example: \"01A\" :param substring: all postal code contains", "city = find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass # area_name if", "\"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self,", "is fuzzy and typo tolerant. :param area_code: int, all postal code area_code exactly", "returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True,", "city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass #", "with this prefix, for example: \"01A\" :param substring: all postal code contains this", "timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns,", "from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a", "population self.dwellings = dwellings self.timezone = timezone self.day_light_savings = day_light_savings def __str__(self): return", "self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name,", ":param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within a 3-d space", "province, city, area_name. state name could be 2-letter abbreviation, or full name, and", "population_less is not None: filters.append(t.c.population <= population_less) # dwellings if dwellings_greater is not", "elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "day_light_savings: integer, indicate that whether this zipcode use day light savings. Compare two", "province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self,", ") def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by,", "be all given or not.\") # prefix if prefix is not None: if", "query sql = select([t]).where(and_(*filters)) if sort_by: if ascending: clause = t.c[sort_by].asc() else: clause", "lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending,", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\" sql", "= dwellings self.timezone = timezone self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4) def", "a 1-7 letter length!\") # province if province: try: province = find_province(province, best_match=True)[0]", "= PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not find '%s'!\" % postalcode) def", "= \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be a 1-7", "timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat, lng,", "lng is None and radius is None: pass else: raise ValueError(\"lat, lng, radius", "self.postalcode is not None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self):", "not None and lng is not None and radius is not None: dist_btwn_lat_deg", "filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less) # timezone", "# if use \"near\" search if radius: # sort_by given, then sort by", "near lat, lng with in xxx miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater,", "be a 1-7 letter length!\") # substring if substring is not None: if", "best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass # area_code if area_code: filters.append(t.c.area_code ==", "lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat,", "other.postalcode def __lt__(self, other): return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2 bool()", "1 <= len(prefix) <= 7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise", "is None: pass else: raise ValueError(\"lat, lng, radius has to be all given", "key=lambda x: x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda x: x[0]) result =", "ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by,", "self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode =", "returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending,", "random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0] for row in self.connect.execute(sql)] result", "% substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be a 1-7 letter length!\")", "given, then sort by distance, don't use limit clause else: heap = list()", "select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not", "\"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None, area_code=None,", "# 43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if", "prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be a 1-7 letter length!\") #", "def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, )", "import select, func, and_ try: from .data import ( engine, t, find_province, find_city,", "if radius: # sort_by given, then sort by keyword if sort_by: result =", "__str__(self): return self.to_json(indent=4) def __eq__(self, other): return self.postalcode == other.postalcode def __lt__(self, other):", "<= 7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to", "integer, population - dwellings: integer, dwellings - timezone: integer, timezone - day_light_savings: integer,", "int, whether using day light savings. \"\"\" filters = list() # near lat,", "return self.postalcode is not None def __bool__(self): \"\"\"For Python3 bool() method. \"\"\" return", "postalcode except: raise ValueError(\"Can not find '%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode,", "return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "self.dwellings = dwellings self.timezone = timezone self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4)", "'%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by,", "returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less,", ":param lat, lng, radius: search near lat, lng with in xxx miles. :param", "not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less)", "select([t.c.postalcode]) all_postalcode = [row[0] for row in self.connect.execute(sql)] result = list() for postalcode", "whether this zipcode use day light savings. Compare two postal code is actually", "dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if", "def __str__(self): return self.to_json(indent=4) def __eq__(self, other): return self.postalcode == other.postalcode def __lt__(self,", "else: raise ValueError(\"prefix has to be a 1-7 letter length!\") # substring if", "# Use heap sort to find top-K if ascending: heap = heapq.nsmallest(returns, heap,", "not isinstance(prefix, string_types): raise TypeError(\"prefix has to be a string\") if 1 <=", "0A3\" - city: city name, example: \"Ottawa\" - province: 2 letters province name", "def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None,", "example: \"ON\" - area_code: integer, 3 letter digits, example: 123 - area_name: area", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring,", "not None: filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater is not None: filters.append(t.c.timezone", "= longitude self.elevation = elevation self.population = population self.dwellings = dwellings self.timezone =", "radius: # sort_by given, then sort by keyword if sort_by: result = list()", "radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, )", "self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "import great_circle from .pkg.six import string_types except: from cazipcode.data import ( engine, t,", ") from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types", ":param prefix: all postal code with this prefix, for example: \"01A\" :param substring:", "ValueError(\"prefix has to be a 1-7 letter length!\") # substring if substring is", "= great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if len(result)", "(lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude", "sql = sql.order_by(clause) # if use \"near\" search if radius: # sort_by given,", "key=lambda x: x[0]) result = [PostalCode._make(row) for _, row in heap] # else:", "except: from cazipcode.data import ( engine, t, find_province, find_city, find_area_name, fields, ) from", "= t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns) result", ".data import ( engine, t, find_province, find_city, find_area_name, fields, ) from .pkg.nameddict import", "lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode,", "<= 7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to", "city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "# population if population_greater is not None: filters.append(t.c.population >= population_greater) if population_less is", "and radius is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172", "return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode", "day_light_savings if day_light_savings is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) #", "province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode,", "code is actually comparing it's postal code string. \"\"\" __attrs__ = [ \"postalcode\",", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending,", "a range. :param timezone: int, all postal code timezone exactly matches. :param day_light_savings:", "clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row) for", "(lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\"", "or not.\") # prefix if prefix is not None: if not isinstance(prefix, string_types):", "prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass #", "filters.append(t.c.longitude >= lng_greater) if lng_less is not None: filters.append(t.c.longitude <= lng_less) # elevation", "raise TypeError(\"substring has to be a string\") if 1 <= len(substring) <= 7:", "lat_less is not None: filters.append(t.c.latitude <= lat_less) # longitude if lng_greater is not", "if timezone: filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings is not None: day_light_savings", "returns: break # sort_by not given, then sort by distance, don't use limit", "by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings,", "( engine, t, find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import Base from", "space box. :param province, city, area_name: search by province, city, area_name. state name", "and lng is None and radius is None: pass else: raise ValueError(\"lat, lng,", "def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return self.postalcode is not None def", "a range. :param dwellings_greater, dwellings_less: dwellings falls in a range. :param timezone_greater, timezone_less:", "area_name. state name could be 2-letter abbreviation, or full name, and this search", "timezone falls in a range. :param timezone: int, all postal code timezone exactly", "lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None,", "if prefix is not None: if not isinstance(prefix, string_types): raise TypeError(\"prefix has to", "lat is None and lng is None and radius is None: pass else:", "filters.append(t.c.city == city) except ValueError: pass # area_name if area_name: try: area_name =", "list() for row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle(", "to be a 1-7 letter length!\") # province if province: try: province =", "dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater is not", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city,", "day light savings. \"\"\" filters = list() # near lat, lng if lat", "in xxx miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within", "radius: result.append(PostalCode._make(row)) if len(result) == returns: break # sort_by not given, then sort", "by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by,", "self.area_name = area_name self.latitude = latitude self.longitude = longitude self.elevation = elevation self.population", "this prefix, for example: \"01A\" :param substring: all postal code contains this substring.", "lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" %", "area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None,", "ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None,", "heap, key=lambda x: x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda x: x[0]) result", "lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None,", "lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad # print(\"%.6f, %.6f,", "integer, 3 letter digits, example: 123 - area_name: area name, example: \"Ottawa\" -", "lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns,", "to be a string\") if 1 <= len(prefix) <= 7: pattern = \"%s%%\"", "timezone_greater) if timezone_less is not None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone ==", "ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat, lng, radius: search near lat,", "all postal code timezone exactly matches. :param day_light_savings: bool or int, whether using", "find_city, find_area_name, fields, ) from .pkg.nameddict import Base from .pkg.geo_search import great_circle from", "- postalcode: 7 letter, example: \"A0A 0A3\" - city: city name, example: \"Ottawa\"", "raise ValueError(\"lat, lng, radius has to be all given or not.\") # prefix", "lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None,", "has to be a string\") if 1 <= len(prefix) <= 7: pattern =", "<= lng_less) # elevation if elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater) if", "returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None,", "dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad", "given, then sort by keyword if sort_by: result = list() for row in", "city name, example: \"Ottawa\" - province: 2 letters province name abbreviation, example: \"ON\"", "1-7 letter length!\") # province if province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province", "near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by,", "elevation self.population = population self.dwellings = dwellings self.timezone = timezone self.day_light_savings = day_light_savings", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code,", "if population_greater is not None: filters.append(t.c.population >= population_greater) if population_less is not None:", ") def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns,", "def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, )", "pass else: raise ValueError(\"lat, lng, radius has to be all given or not.\")", "lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact postal", "radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None,", "population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search", "* 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower =", "lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None,", "top-K if ascending: heap = heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap =", "not find '%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect = engine.connect() def __enter__(self): return self def", "\"\"\"For Python2 bool() method. \"\"\" return self.postalcode is not None def __bool__(self): \"\"\"For", "pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be a", "None and lng is None and radius is None: pass else: raise ValueError(\"lat,", "row in self.connect.execute(sql)] return result def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", ") def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns,", "state name could be 2-letter abbreviation, or full name, and this search is", "in self.connect.execute(sql)] result = list() for postalcode in random.sample(all_postalcode, returns): result.append(self.by_postalcode(postalcode)) return result", "heap.append((dist, row)) # Use heap sort to find top-K if ascending: heap =", "self.connect.execute(sql)] return result def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater", "== area_code) # latitude if lat_greater is not None: filters.append(t.c.latitude >= lat_greater) if", "lng_greater) if lng_less is not None: filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater", "bool or int, whether using day light savings. \"\"\" filters = list() #", "returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True,", "import ( engine, t, find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import Base", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city,", "example: \"Ottawa\" - latitude: latitude - longitude: longitude - elevation: elevation - population:", "tolerant. :param area_code: int, all postal code area_code exactly matches. :param prefix: all", "None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is not None: filters.append(t.c.elevation <= elevation_less) #", "isinstance(substring, string_types): raise TypeError(\"substring has to be a string\") if 1 <= len(substring)", "else: if not sort_by: if ascending: clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc()", "\"\"\" \"\"\" def __init__(self): self.connect = engine.connect() def __enter__(self): return self def __exit__(self,", "self.population = population self.dwellings = dwellings self.timezone = timezone self.day_light_savings = day_light_savings def", "lng), (row.latitude, row.longitude)) if dist <= radius: heap.append((dist, row)) # Use heap sort", "\"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except:", "else: clause = t.c[sort_by].desc() sql = sql.order_by(clause) # if use \"near\" search if", "return self.postalcode == other.postalcode def __lt__(self, other): return self.postalcode < other.postalcode def __nonzero__(self):", "is None and radius is None: pass else: raise ValueError(\"lat, lng, radius has", "returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True,", "ValueError: pass # area_name if area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name ==", "from .pkg.nameddict import Base from .pkg.geo_search import great_circle from .pkg.six import string_types except:", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda x: x[0])", "search near lat, lng with in xxx miles. :param lat_greater, lat_less, lng_greater, lng_less,", "string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal code. Attributes: - postalcode: 7 letter,", "isinstance(prefix, string_types): raise TypeError(\"prefix has to be a string\") if 1 <= len(prefix)", ") def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0] for row in", "import radians, cos from functools import total_ordering from sqlalchemy import select, func, and_", "integer, dwellings - timezone: integer, timezone - day_light_savings: integer, indicate that whether this", "result def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng,", "clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql = sql.order_by(clause) # if use", "day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode,", "ValueError: pass # area_code if area_code: filters.append(t.c.area_code == area_code) # latitude if lat_greater", "exactly matches. :param prefix: all postal code with this prefix, for example: \"01A\"", "postal code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return", "row)) # Use heap sort to find top-K if ascending: heap = heapq.nsmallest(returns,", "self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True,", ".pkg.geo_search import great_circle from .pkg.six import string_types except: from cazipcode.data import ( engine,", "not None: filters.append(t.c.population <= population_less) # dwellings if dwellings_greater is not None: filters.append(t.c.dwellings", "timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "population: integer, population - dwellings: integer, dwellings - timezone: integer, timezone - day_light_savings:", "row in heap] # else: if not sort_by: if ascending: clause = t.c[fields.postalcode].asc()", "self.timezone = timezone self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self, other):", "search postalcode within a 3-d space box. :param province, city, area_name: search by", "lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper", "area_code: integer, 3 letter digits, example: 123 - area_name: area name, example: \"Ottawa\"", "t, find_province, find_city, find_area_name, fields, ) from .pkg.nameddict import Base from .pkg.geo_search import", "def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, )", "functools import total_ordering from sqlalchemy import select, func, and_ try: from .data import", "sqlalchemy import select, func, and_ try: from .data import ( engine, t, find_province,", "is not None: if not isinstance(prefix, string_types): raise TypeError(\"prefix has to be a", "= \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be a 1-7", "except ValueError: pass # area_name if area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name", "filters.append(t.c.area_name == area_name) except ValueError: pass # area_code if area_code: filters.append(t.c.area_code == area_code)", "is not None: filters.append(t.c.population <= population_less) # dwellings if dwellings_greater is not None:", "returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings,", "None: filters.append(t.c.latitude <= lat_less) # longitude if lng_greater is not None: filters.append(t.c.longitude >=", "area_code self.area_name = area_name self.latitude = latitude self.longitude = longitude self.elevation = elevation", "None and lng is not None and radius is not None: dist_btwn_lat_deg =", "# print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper)", "sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row) for row in self.connect.execute(sql)] return result", "Base from .pkg.geo_search import great_circle from .pkg.six import string_types except: from cazipcode.data import", "not None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect =", "if ascending: clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql", "sort_by: if ascending: clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql = sql.order_by(clause)", "a string\") if 1 <= len(prefix) <= 7: pattern = \"%s%%\" % prefix", "= t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql = sql.order_by(clause) # if use \"near\"", "= area_code self.area_name = area_name self.latitude = latitude self.longitude = longitude self.elevation =", "this substring. :param population_greater, population_less: population falls in a range. :param dwellings_greater, dwellings_less:", "if ascending: clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql = sql.order_by(clause) #", "dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True,", "clause = t.c[sort_by].desc() sql = sql.order_by(clause) # if use \"near\" search if radius:", "<= lat_less) # longitude if lng_greater is not None: filters.append(t.c.longitude >= lng_greater) if", "heapq.nlargest(returns, heap, key=lambda x: x[0]) result = [PostalCode._make(row) for _, row in heap]", "x: x[0]) result = [PostalCode._make(row) for _, row in heap] # else: if", "# city if city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city == city) except", "find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle", "with in xxx miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode", "if elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is not None:", "city if city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError:", "lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within a 3-d space box. :param province,", "= t.c[sort_by].desc() sql = sql.order_by(clause) # if use \"near\" search if radius: #", "\"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None,", ") def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns,", "- population: integer, population - dwellings: integer, dwellings - timezone: integer, timezone -", "== city) except ValueError: pass # area_name if area_name: try: area_name = find_area_name(area_name,", "self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "= area_name self.latitude = latitude self.longitude = longitude self.elevation = elevation self.population =", "by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "name, example: \"Ottawa\" - province: 2 letters province name abbreviation, example: \"ON\" -", "falls in a range. :param dwellings_greater, dwellings_less: dwellings falls in a range. :param", "- lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper", "None: filters.append(t.c.longitude >= lng_greater) if lng_less is not None: filters.append(t.c.longitude <= lng_less) #", "find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass # area_name if area_name: try:", "ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0] for", "lat is not None and lng is not None and radius is not", "PostalCode(Base): \"\"\"Represent a postal code. Attributes: - postalcode: 7 letter, example: \"A0A 0A3\"", "ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by,", "% prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be a 1-7 letter length!\")", "postal code string. \"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\",", "lng_less is not None: filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater is not", "radius has to be all given or not.\") # prefix if prefix is", "7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be", "= select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can", "example: 123 - area_name: area name, example: \"Ottawa\" - latitude: latitude - longitude:", "elif lat is None and lng is None and radius is None: pass", "class PostalCode(Base): \"\"\"Represent a postal code. Attributes: - postalcode: 7 letter, example: \"A0A", "__lt__(self, other): return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\"", "== province) except ValueError: pass # city if city: try: city = find_city(city,", "dwellings_less) # timezone if timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less", "ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT,", "a 1-7 letter length!\") # substring if substring is not None: if not", "abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat", "great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >=", "cazipcode.data import ( engine, t, find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import", "= timezone self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self, other): return", "2 letters province name abbreviation, example: \"ON\" - area_code: integer, 3 letter digits,", "is not None def __bool__(self): \"\"\"For Python3 bool() method. \"\"\" return self.postalcode is", "cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types @total_ordering class", "all postal code area_code exactly matches. :param prefix: all postal code with this", "letters province name abbreviation, example: \"ON\" - area_code: integer, 3 letter digits, example:", "use day light savings. Compare two postal code is actually comparing it's postal", "length!\") # province if province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province == province)", "method. \"\"\" return self.postalcode is not None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\"", "radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\"", "= list() # near lat, lng if lat is not None and lng", "for row in self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist", "then sort by distance, don't use limit clause else: heap = list() for", "return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "heap, key=lambda x: x[0]) result = [PostalCode._make(row) for _, row in heap] #", "x[0]) result = [PostalCode._make(row) for _, row in heap] # else: if not", ") def by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode ==", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0] for row", "try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass # area_code", "<= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is None and", "is not None: filters.append(t.c.latitude >= lat_greater) if lat_less is not None: filters.append(t.c.latitude <=", "self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self, other): return self.postalcode ==", "in a range. :param timezone_greater, timezone_less: timezone falls in a range. :param timezone:", "then sort by keyword if sort_by: result = list() for row in self.connect.execute(sql):", "1 <= len(substring) <= 7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise", "returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None,", "row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if len(result) == returns: break # sort_by", "dist <= radius: heap.append((dist, row)) # Use heap sort to find top-K if", "ascending: heap = heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap = heapq.nlargest(returns, heap,", "is not None: filters.append(t.c.population >= population_greater) if population_less is not None: filters.append(t.c.population <=", ">= lat_greater) if lat_less is not None: filters.append(t.c.latitude <= lat_less) # longitude if", "lng, radius has to be all given or not.\") # prefix if prefix", "name could be 2-letter abbreviation, or full name, and this search is fuzzy", "by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "this search is fuzzy and typo tolerant. :param area_code: int, all postal code", "dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius * 1.05", "radius is None: pass else: raise ValueError(\"lat, lng, radius has to be all", "ValueError: pass # city if city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city ==", "= abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper =", "- province: 2 letters province name abbreviation, example: \"ON\" - area_code: integer, 3", "def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, )", "\"\"\"Represent a postal code. Attributes: - postalcode: 7 letter, example: \"A0A 0A3\" -", "prefix, for example: \"01A\" :param substring: all postal code contains this substring. :param", "= elevation self.population = population self.dwellings = dwellings self.timezone = timezone self.day_light_savings =", "city: city name, example: \"Ottawa\" - province: 2 letters province name abbreviation, example:", "population_greater) if population_less is not None: filters.append(t.c.population <= population_less) # dwellings if dwellings_greater", "1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat", "timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "method. \"\"\" return self.postalcode is not None def __bool__(self): \"\"\"For Python3 bool() method.", "area_code exactly matches. :param prefix: all postal code with this prefix, for example:", "_, row in heap] # else: if not sort_by: if ascending: clause =", "name, and this search is fuzzy and typo tolerant. :param area_code: int, all", "other): return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return", "\"\"\" return self.postalcode is not None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\"", "result = [PostalCode._make(row) for _, row in heap] # else: if not sort_by:", "def __enter__(self): return self def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档**", "typo tolerant. :param area_code: int, all postal code area_code exactly matches. :param prefix:", "longitude - elevation: elevation - population: integer, population - dwellings: integer, dwellings -", "-73.556256 dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: heap.append((dist,", ".pkg.nameddict import Base from .pkg.geo_search import great_circle from .pkg.six import string_types except: from", "this zipcode use day light savings. Compare two postal code is actually comparing", "lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\"", "area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass # area_code if", "be a string\") if 1 <= len(substring) <= 7: pattern = \"%%%s%%\" %", "<= elevation_less) # population if population_greater is not None: filters.append(t.c.population >= population_greater) if", "def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "ValueError(\"Can not find '%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda x: x[0]) result = [PostalCode._make(row) for", "population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "< other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return self.postalcode is not", "string\") if 1 <= len(substring) <= 7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern))", "int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query sql = select([t]).where(and_(*filters)) if sort_by: if", "t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql = sql.order_by(clause) # if use \"near\" search", "self.connect = engine.connect() def __enter__(self): return self def __exit__(self, *exc_info): self.connect.close() def close(self):", "if elevation_less is not None: filters.append(t.c.elevation <= elevation_less) # population if population_greater is", "and_ try: from .data import ( engine, t, find_province, find_city, find_area_name, fields, )", "= list() for row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist =", "elevation_greater, elevation_less: search postalcode within a 3-d space box. :param province, city, area_name:", "and typo tolerant. :param area_code: int, all postal code area_code exactly matches. :param", "if lng_less is not None: filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater is", ") def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "substring if substring is not None: if not isinstance(substring, string_types): raise TypeError(\"substring has", "is not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is not None: filters.append(t.c.timezone <=", "result = list() for row in self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude,", "integer, timezone - day_light_savings: integer, indicate that whether this zipcode use day light", "by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try:", "not None: filters.append(t.c.population >= population_greater) if population_less is not None: filters.append(t.c.population <= population_less)", "__init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None):", "len(substring) <= 7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has", "returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending,", "code area_code exactly matches. :param prefix: all postal code with this prefix, for", "if area_code: filters.append(t.c.area_code == area_code) # latitude if lat_greater is not None: filters.append(t.c.latitude", "func, and_ try: from .data import ( engine, t, find_province, find_city, find_area_name, fields,", "is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query sql", "Use heap sort to find top-K if ascending: heap = heapq.nsmallest(returns, heap, key=lambda", "longitude: longitude - elevation: elevation - population: integer, population - dwellings: integer, dwellings", "* 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat +", "if not sort_by: if ascending: clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql", "lng_upper) elif lat is None and lng is None and radius is None:", "= sql.order_by(clause) sql = sql.limit(returns) result = [PostalCode._make(row) for row in self.connect.execute(sql)] return", "clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns)", "not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query sql =", "\"\"\" return self.postalcode is not None def __bool__(self): \"\"\"For Python3 bool() method. \"\"\"", "latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province = province", "abbreviation, example: \"ON\" - area_code: integer, 3 letter digits, example: 123 - area_name:", "a postal code. Attributes: - postalcode: 7 letter, example: \"A0A 0A3\" - city:", "area name, example: \"Ottawa\" - latitude: latitude - longitude: longitude - elevation: elevation", "area_code: int, all postal code area_code exactly matches. :param prefix: all postal code", "dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower =", "digits, example: 123 - area_name: area name, example: \"Ottawa\" - latitude: latitude -", "cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius", "- day_light_savings: integer, indicate that whether this zipcode use day light savings. Compare", "t.c[sort_by].desc() sql = sql.order_by(clause) # if use \"near\" search if radius: # sort_by", "returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True,", "all postal code contains this substring. :param population_greater, population_less: population falls in a", "import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal code. Attributes: - postalcode: 7", "postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns,", "def __init__(self): self.connect = engine.connect() def __enter__(self): return self def __exit__(self, *exc_info): self.connect.close()", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None,", "\"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None,", "lng_upper = lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper,", "code contains this substring. :param population_greater, population_less: population falls in a range. :param", "x: x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda x: x[0]) result = [PostalCode._make(row)", "None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query sql = select([t]).where(and_(*filters))", "self.to_json(indent=4) def __eq__(self, other): return self.postalcode == other.postalcode def __lt__(self, other): return self.postalcode", "population_greater, population_less: population falls in a range. :param dwellings_greater, dwellings_less: dwellings falls in", "if lng_greater is not None: filters.append(t.c.longitude >= lng_greater) if lng_less is not None:", "self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row))", "day_light_savings is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query", "find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass # area_code if area_code: filters.append(t.c.area_code", "self.province = province self.city = city self.area_code = area_code self.area_name = area_name self.latitude", "= postalcode self.province = province self.city = city self.area_code = area_code self.area_name =", "timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is not None: filters.append(t.c.timezone", "find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle from", "timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, )", "return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode,", "%.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper)))", "return result def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat,", "timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat,", "not None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone == timezone) # day_light_savings if", "dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province = province self.city = city self.area_code", "could be 2-letter abbreviation, or full name, and this search is fuzzy and", "returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "example: \"A0A 0A3\" - city: city name, example: \"Ottawa\" - province: 2 letters", "savings. Compare two postal code is actually comparing it's postal code string. \"\"\"", "don't use limit clause else: heap = list() for row in self.connect.execute(sql): #", ") def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def", "find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass # city if city: try:", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name,", "dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper)", "engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None,", "- lon_degr_rad lng_upper = lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" %", "ascending: clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql = sql.order_by(clause) # if", "(lat, lng), (row.latitude, row.longitude)) if dist <= radius: result.append(PostalCode._make(row)) if len(result) == returns:", "ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\" sql =", "None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings", "population_greater is not None: filters.append(t.c.population >= population_greater) if population_less is not None: filters.append(t.c.population", "% postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending,", "ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None,", "dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "to be a 1-7 letter length!\") # substring if substring is not None:", "population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful", "None: pass else: raise ValueError(\"lat, lng, radius has to be all given or", ">= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat", "lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True,", "filters.append(t.c.population >= population_greater) if population_less is not None: filters.append(t.c.population <= population_less) # dwellings", "filters.append(t.c.timezone >= timezone_greater) if timezone_less is not None: filters.append(t.c.timezone <= timezone_less) if timezone:", "other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return self.postalcode is not None", "population_less: population falls in a range. :param dwellings_greater, dwellings_less: dwellings falls in a", "\"\"\"Find exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode =", "lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None,", "lng - lon_degr_rad lng_upper = lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\"", "filters = list() # near lat, lng if lat is not None and", ">= elevation_greater) if elevation_less is not None: filters.append(t.c.elevation <= elevation_less) # population if", "postal code. Attributes: - postalcode: 7 letter, example: \"A0A 0A3\" - city: city", "is not None: filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater is not None:", "contains this substring. :param population_greater, population_less: population falls in a range. :param dwellings_greater,", "dwellings self.timezone = timezone self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self,", "bool() method. \"\"\" return self.postalcode is not None def __bool__(self): \"\"\"For Python3 bool()", "substring. :param population_greater, population_less: population falls in a range. :param dwellings_greater, dwellings_less: dwellings", "not None: filters.append(t.c.latitude <= lat_less) # longitude if lng_greater is not None: filters.append(t.c.longitude", "python # -*- coding: utf-8 -*- import random import heapq from math import", "box. :param province, city, area_name: search by province, city, area_name. state name could", "has to be all given or not.\") # prefix if prefix is not", "province) except ValueError: pass # city if city: try: city = find_city(city, best_match=True)[0]", "fields, ) from .pkg.nameddict import Base from .pkg.geo_search import great_circle from .pkg.six import", "name abbreviation, example: \"ON\" - area_code: integer, 3 letter digits, example: 123 -", "if area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass", "ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True,", ") def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns,", "lng if lat is not None and lng is not None and radius", "sort_by: result = list() for row in self.connect.execute(sql): dist = great_circle( (lat, lng),", "ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True,", "try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not find '%s'!\" %", "if population_less is not None: filters.append(t.c.population <= population_less) # dwellings if dwellings_greater is", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province,", "<= radius: heap.append((dist, row)) # Use heap sort to find top-K if ascending:", "area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "\"ON\" - area_code: integer, 3 letter digits, example: 123 - area_name: area name,", "self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None,", "by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "filters.append(t.c.population <= population_less) # dwellings if dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater)", "elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None,", "fields, ) from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import", "None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less) #", "execute query sql = select([t]).where(and_(*filters)) if sort_by: if ascending: clause = t.c[sort_by].asc() else:", "has to be a string\") if 1 <= len(substring) <= 7: pattern =", "example: \"Ottawa\" - province: 2 letters province name abbreviation, example: \"ON\" - area_code:", "abbreviation, or full name, and this search is fuzzy and typo tolerant. :param", "prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None,", "day_light_savings=None): self.postalcode = postalcode self.province = province self.city = city self.area_code = area_code", "returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending,", "radians, cos from functools import total_ordering from sqlalchemy import select, func, and_ try:", "return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return self.postalcode", "3-d space box. :param province, city, area_name: search by province, city, area_name. state", "great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower)", "two postal code is actually comparing it's postal code string. \"\"\" __attrs__ =", "pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be a", "self.area_code = area_code self.area_name = area_name self.latitude = latitude self.longitude = longitude self.elevation", "lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper =", "xxx miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within a", "timezone: int, all postal code timezone exactly matches. :param day_light_savings: bool or int,", "# area_name if area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except", "# latitude if lat_greater is not None: filters.append(t.c.latitude >= lat_greater) if lat_less is", "province name abbreviation, example: \"ON\" - area_code: integer, 3 letter digits, example: 123", "try: province = find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass # city", "area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self,", "self def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close()", "\"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be a 1-7 letter", "self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return self.postalcode is", "from functools import total_ordering from sqlalchemy import select, func, and_ try: from .data", "timezone=None, day_light_savings=None): self.postalcode = postalcode self.province = province self.city = city self.area_code =", "\"\"\" filters = list() # near lat, lng if lat is not None", "example: \"01A\" :param substring: all postal code contains this substring. :param population_greater, population_less:", "area_code: filters.append(t.c.area_code == area_code) # latitude if lat_greater is not None: filters.append(t.c.latitude >=", "in self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius:", "lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad", "filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is None", "if 1 <= len(prefix) <= 7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else:", "lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less,", "or full name, and this search is fuzzy and typo tolerant. :param area_code:", "\"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def", "a 3-d space box. :param province, city, area_name: search by province, city, area_name.", "population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, )", "not None and radius is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat))", "-*- import random import heapq from math import radians, cos from functools import", "is not None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect", "lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper))", "coding: utf-8 -*- import random import heapq from math import radians, cos from", "def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None,", "dwellings if dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not", "for row in self.connect.execute(sql)] return result def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True,", "\"Ottawa\" - province: 2 letters province name abbreviation, example: \"ON\" - area_code: integer,", "from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types @total_ordering", "def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def", "else: raise ValueError(\"substring has to be a 1-7 letter length!\") # province if", "**中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None,", "<= timezone_less) if timezone: filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings is not", "timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending,", ":param timezone_greater, timezone_less: timezone falls in a range. :param timezone: int, all postal", "class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect = engine.connect() def __enter__(self): return self", "dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings", "sql.order_by(clause) # if use \"near\" search if radius: # sort_by given, then sort", "in self.connect.execute(sql)] return result def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "lat_greater is not None: filters.append(t.c.latitude >= lat_greater) if lat_less is not None: filters.append(t.c.latitude", "filters.append(t.c.day_light_savings == day_light_savings) # execute query sql = select([t]).where(and_(*filters)) if sort_by: if ascending:", "savings. \"\"\" filters = list() # near lat, lng if lat is not", "ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater,", "# dwellings if dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql", "= cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad =", "\"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ]", "great_circle from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal code. Attributes:", "\"01A\" :param substring: all postal code contains this substring. :param population_greater, population_less: population", "not None: if not isinstance(substring, string_types): raise TypeError(\"substring has to be a string\")", "123 - area_name: area name, example: \"Ottawa\" - latitude: latitude - longitude: longitude", "postalcode within a 3-d space box. :param province, city, area_name: search by province,", "area_name if area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError:", "import string_types except: from cazipcode.data import ( engine, t, find_province, find_city, find_area_name, fields,", "if lat is not None and lng is not None and radius is", "area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province", "filters.append(t.c.area_code == area_code) # latitude if lat_greater is not None: filters.append(t.c.latitude >= lat_greater)", "falls in a range. :param timezone: int, all postal code timezone exactly matches.", ">= lng_greater) if lng_less is not None: filters.append(t.c.longitude <= lng_less) # elevation if", "= sql.order_by(clause) # if use \"near\" search if radius: # sort_by given, then", "substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find(", "lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <=", "latitude self.longitude = longitude self.elevation = elevation self.population = population self.dwellings = dwellings", "(lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <=", "None: if not isinstance(prefix, string_types): raise TypeError(\"prefix has to be a string\") if", "limit clause else: heap = list() for row in self.connect.execute(sql): # 43.959918, 46.995828,", "ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode,", "sort to find top-K if ascending: heap = heapq.nsmallest(returns, heap, key=lambda x: x[0])", "by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater,", "69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg)", "\"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None,", "postal code area_code exactly matches. :param prefix: all postal code with this prefix,", "raise ValueError(\"Can not find '%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "total_ordering from sqlalchemy import select, func, and_ try: from .data import ( engine,", "- longitude: longitude - elevation: elevation - population: integer, population - dwellings: integer,", "elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province = province self.city =", "from math import radians, cos from functools import total_ordering from sqlalchemy import select,", "zipcode use day light savings. Compare two postal code is actually comparing it's", "\"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None,", "row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle( (lat, lng),", "filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings is not None: day_light_savings = int(day_light_savings)", "filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings is", "(lat, lng), (row.latitude, row.longitude)) if dist <= radius: heap.append((dist, row)) # Use heap", "lng_greater is not None: filters.append(t.c.longitude >= lng_greater) if lng_less is not None: filters.append(t.c.longitude", "ascending: clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql =", "dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: heap.append((dist, row))", "city self.area_code = area_code self.area_name = area_name self.latitude = latitude self.longitude = longitude", "by distance, don't use limit clause else: heap = list() for row in", "is not None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone == timezone) # day_light_savings", "dwellings: integer, dwellings - timezone: integer, timezone - day_light_savings: integer, indicate that whether", "== timezone) # day_light_savings if day_light_savings is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings", "code string. \"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\",", "- elevation: elevation - population: integer, population - dwellings: integer, dwellings - timezone:", "be 2-letter abbreviation, or full name, and this search is fuzzy and typo", "elevation_greater is not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is not None: filters.append(t.c.elevation", ") def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns,", "def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, )", "sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True,", "def __eq__(self, other): return self.postalcode == other.postalcode def __lt__(self, other): return self.postalcode <", "find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None,", "+ lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) #", "TypeError(\"prefix has to be a string\") if 1 <= len(prefix) <= 7: pattern", "return self def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\"", ">= timezone_greater) if timezone_less is not None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone", "- city: city name, example: \"Ottawa\" - province: 2 letters province name abbreviation,", "lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None,", "range. :param timezone: int, all postal code timezone exactly matches. :param day_light_savings: bool", ">= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is None and lng is None", "ValueError(\"lat, lng, radius has to be all given or not.\") # prefix if", "not isinstance(substring, string_types): raise TypeError(\"substring has to be a string\") if 1 <=", "lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within a 3-d space box. :param", "letter digits, example: 123 - area_name: area name, example: \"Ottawa\" - latitude: latitude", "string. \"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\",", "is not None: if not isinstance(substring, string_types): raise TypeError(\"substring has to be a", "not None: filters.append(t.c.latitude >= lat_greater) if lat_less is not None: filters.append(t.c.latitude <= lat_less)", "if dwellings_greater is not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not None:", "string\") if 1 <= len(prefix) <= 7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern))", "population - dwellings: integer, dwellings - timezone: integer, timezone - day_light_savings: integer, indicate", "prefix if prefix is not None: if not isinstance(prefix, string_types): raise TypeError(\"prefix has", "powerful search method. :param lat, lng, radius: search near lat, lng with in", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0]", "code. Attributes: - postalcode: 7 letter, example: \"A0A 0A3\" - city: city name,", "is not None: filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater is not None:", "area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A", "from .data import ( engine, t, find_province, find_city, find_area_name, fields, ) from .pkg.nameddict", "if dist <= radius: result.append(PostalCode._make(row)) if len(result) == returns: break # sort_by not", "self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None,", "or int, whether using day light savings. \"\"\" filters = list() # near", "= find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass # area_code if area_code:", "return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "@total_ordering class PostalCode(Base): \"\"\"Represent a postal code. Attributes: - postalcode: 7 letter, example:", "longitude if lng_greater is not None: filters.append(t.c.longitude >= lng_greater) if lng_less is not", "TypeError(\"substring has to be a string\") if 1 <= len(substring) <= 7: pattern", "elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less,", "returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending,", "dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param", "heap] # else: if not sort_by: if ascending: clause = t.c[fields.postalcode].asc() else: clause", "None def __bool__(self): \"\"\"For Python3 bool() method. \"\"\" return self.postalcode is not None", "from .pkg.six import string_types except: from cazipcode.data import ( engine, t, find_province, find_city,", "return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect = engine.connect() def __enter__(self): return", "from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base): \"\"\"Represent a postal code. Attributes: -", "area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province =", "postal code is actually comparing it's postal code string. \"\"\" __attrs__ = [", "self.city = city self.area_code = area_code self.area_name = area_name self.latitude = latitude self.longitude", "= lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng -", "if sort_by: if ascending: clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc() sql =", "int, all postal code timezone exactly matches. :param day_light_savings: bool or int, whether", "def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self,", "heap = list() for row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256 dist", "from cazipcode.data import ( engine, t, find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict", "+ lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad #", "be a 1-7 letter length!\") # province if province: try: province = find_province(province,", "def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, )", "def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less,", "if substring is not None: if not isinstance(substring, string_types): raise TypeError(\"substring has to", "raise ValueError(\"substring has to be a 1-7 letter length!\") # province if province:", "lat_less) # longitude if lng_greater is not None: filters.append(t.c.longitude >= lng_greater) if lng_less", "returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending,", "lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng", "- dwellings: integer, dwellings - timezone: integer, timezone - day_light_savings: integer, indicate that", ":param population_greater, population_less: population falls in a range. :param dwellings_greater, dwellings_less: dwellings falls", "search method. :param lat, lng, radius: search near lat, lng with in xxx", "not given, then sort by distance, don't use limit clause else: heap =", "= find_city(city, best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass # area_name if area_name:", "self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact", "lng is not None and radius is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg", "1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad", "<= len(prefix) <= 7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix", "best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass # city if city: try: city", "return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode,", "\"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None,", "letter length!\") # substring if substring is not None: if not isinstance(substring, string_types):", "math import radians, cos from functools import total_ordering from sqlalchemy import select, func,", "is actually comparing it's postal code string. \"\"\" __attrs__ = [ \"postalcode\", \"city\",", "dist <= radius: result.append(PostalCode._make(row)) if len(result) == returns: break # sort_by not given,", "return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "city, area_name. state name could be 2-letter abbreviation, or full name, and this", "Compare two postal code is actually comparing it's postal code string. \"\"\" __attrs__", "__nonzero__(self): \"\"\"For Python2 bool() method. \"\"\" return self.postalcode is not None def __bool__(self):", "[ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\",", "timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat, lng, radius:", "falls in a range. :param timezone_greater, timezone_less: timezone falls in a range. :param", "ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater,", "best_match=True)[0] filters.append(t.c.city == city) except ValueError: pass # area_name if area_name: try: area_name", "lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search postalcode within a 3-d space box.", "filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif", "self.latitude = latitude self.longitude = longitude self.elevation = elevation self.population = population self.dwellings", "self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True,", "= abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg)", "letter length!\") # province if province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province ==", "postalcode): \"\"\"Find exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode", "= [PostalCode._make(row) for row in self.connect.execute(sql)] return result def near(self, lat, lng, radius,", "prefix: all postal code with this prefix, for example: \"01A\" :param substring: all", "day_light_savings) # execute query sql = select([t]).where(and_(*filters)) if sort_by: if ascending: clause =", "near lat, lng if lat is not None and lng is not None", "sql = select([t]).where(and_(*filters)) if sort_by: if ascending: clause = t.c[sort_by].asc() else: clause =", "city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self,", "43.959918, 46.995828, -77.885944, -73.556256 dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist", "row in self.connect.execute(sql): dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <=", "= great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: heap.append((dist, row)) #", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self,", "day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query sql = select([t]).where(and_(*filters)) if", "timezone: integer, timezone - day_light_savings: integer, indicate that whether this zipcode use day", "by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "self.find( city=city, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None,", "substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be a 1-7 letter length!\") #", "is not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is not None: filters.append(t.c.elevation <=", "self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode,", "__enter__(self): return self def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。", "matches. :param prefix: all postal code with this prefix, for example: \"01A\" :param", "\"\"\"A powerful search method. :param lat, lng, radius: search near lat, lng with", "city) except ValueError: pass # area_name if area_name: try: area_name = find_area_name(area_name, best_match=True)[0]", "\"near\" search if radius: # sort_by given, then sort by keyword if sort_by:", "postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not find '%s'!\"", "ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT): sql =", "- area_name: area name, example: \"Ottawa\" - latitude: latitude - longitude: longitude -", "self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "int, all postal code area_code exactly matches. :param prefix: all postal code with", "range. :param dwellings_greater, dwellings_less: dwellings falls in a range. :param timezone_greater, timezone_less: timezone", "# area_code if area_code: filters.append(t.c.area_code == area_code) # latitude if lat_greater is not", "else: heap = list() for row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944, -73.556256", "code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode", "ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode,", "ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode,", "returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending,", "break # sort_by not given, then sort by distance, don't use limit clause", "it's postal code string. \"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\",", "postal code timezone exactly matches. :param day_light_savings: bool or int, whether using day", "area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name) except ValueError: pass #", "postal code with this prefix, for example: \"01A\" :param substring: all postal code", "from sqlalchemy import select, func, and_ try: from .data import ( engine, t,", "for _, row in heap] # else: if not sort_by: if ascending: clause", "== other.postalcode def __lt__(self, other): return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2", "return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns,", "province: 2 letters province name abbreviation, example: \"ON\" - area_code: integer, 3 letter", "sort by distance, don't use limit clause else: heap = list() for row", "province if province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError:", "%.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper,", "elevation_greater) if elevation_less is not None: filters.append(t.c.elevation <= elevation_less) # population if population_greater", "day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self,", "pass # city if city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city == city)", "returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode):", "def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None,", "engine.connect() def __enter__(self): return self def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine.", "= [PostalCode._make(row) for _, row in heap] # else: if not sort_by: if", "dwellings_greater, dwellings_less: dwellings falls in a range. :param timezone_greater, timezone_less: timezone falls in", "= population self.dwellings = dwellings self.timezone = timezone self.day_light_savings = day_light_savings def __str__(self):", "substring: all postal code contains this substring. :param population_greater, population_less: population falls in", "<= lng_upper) elif lat is None and lng is None and radius is", "= select([t.c.postalcode]) all_postalcode = [row[0] for row in self.connect.execute(sql)] result = list() for", "None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad = abs(radius *", "in a range. :param timezone: int, all postal code timezone exactly matches. :param", "def __lt__(self, other): return self.postalcode < other.postalcode def __nonzero__(self): \"\"\"For Python2 bool() method.", "lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad # print(\"%.6f,", "cos from functools import total_ordering from sqlalchemy import select, func, and_ try: from", "return self.find( lat=lat, lng=lng, radius=radius, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find", "= int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute query sql = select([t]).where(and_(*filters)) if sort_by:", "#!/usr/bin/env python # -*- coding: utf-8 -*- import random import heapq from math", "elevation: elevation - population: integer, population - dwellings: integer, dwellings - timezone: integer,", "lng_upper)) # print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng),", "search if radius: # sort_by given, then sort by keyword if sort_by: result", ") def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater,", "city, area_name: search by province, city, area_name. state name could be 2-letter abbreviation,", "dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT): \"\"\"A powerful search method.", "None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is not None: filters.append(t.c.timezone <= timezone_less) if", "lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater,", ">= population_greater) if population_less is not None: filters.append(t.c.population <= population_less) # dwellings if", "from .pkg.geo_search import great_circle from .pkg.six import string_types except: from cazipcode.data import (", "area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True, returns=DEFAULT_LIMIT):", "__init__(self): self.connect = engine.connect() def __enter__(self): return self def __exit__(self, *exc_info): self.connect.close() def", "[PostalCode._make(row) for _, row in heap] # else: if not sort_by: if ascending:", "[row[0] for row in self.connect.execute(sql)] result = list() for postalcode in random.sample(all_postalcode, returns):", "sql = sql.limit(returns) result = [PostalCode._make(row) for row in self.connect.execute(sql)] return result def", "all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, ) def random(self, returns=DEFAULT_LIMIT):", "timezone self.day_light_savings = day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self, other): return self.postalcode", "in a range. :param dwellings_greater, dwellings_less: dwellings falls in a range. :param timezone_greater,", "- timezone: integer, timezone - day_light_savings: integer, indicate that whether this zipcode use", "self.postalcode = postalcode self.province = province self.city = city self.area_code = area_code self.area_name", "\"\"\"For Python3 bool() method. \"\"\" return self.postalcode is not None DEFAULT_LIMIT = 5", "# longitude if lng_greater is not None: filters.append(t.c.longitude >= lng_greater) if lng_less is", "*exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None,", "utf-8 -*- import random import heapq from math import radians, cos from functools", "use limit clause else: heap = list() for row in self.connect.execute(sql): # 43.959918,", "returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True,", "-*- coding: utf-8 -*- import random import heapq from math import radians, cos", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode,", "Python3 bool() method. \"\"\" return self.postalcode is not None DEFAULT_LIMIT = 5 class", "if timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is not None:", "if dist <= radius: heap.append((dist, row)) # Use heap sort to find top-K", "7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring has to be", "(row.latitude, row.longitude)) if dist <= radius: heap.append((dist, row)) # Use heap sort to", "sql = select([t.c.postalcode]) all_postalcode = [row[0] for row in self.connect.execute(sql)] result = list()", "timezone_less: timezone falls in a range. :param timezone: int, all postal code timezone", "returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0] for row in self.connect.execute(sql)] result =", "filters.append(t.c.province == province) except ValueError: pass # city if city: try: city =", "day_light_savings: bool or int, whether using day light savings. \"\"\" filters = list()", "if timezone_less is not None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone == timezone)", "in heap] # else: if not sort_by: if ascending: clause = t.c[fields.postalcode].asc() else:", "if not isinstance(prefix, string_types): raise TypeError(\"prefix has to be a string\") if 1", "= day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self, other): return self.postalcode == other.postalcode", "__exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self,", "import ( engine, t, find_province, find_city, find_area_name, fields, ) from .pkg.nameddict import Base", "# near lat, lng if lat is not None and lng is not", "has to be a 1-7 letter length!\") # substring if substring is not", "sort_by not given, then sort by distance, don't use limit clause else: heap", "if len(result) == returns: break # sort_by not given, then sort by distance,", "t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause) sql = sql.limit(returns) result =", "letter, example: \"A0A 0A3\" - city: city name, example: \"Ottawa\" - province: 2", ":param timezone: int, all postal code timezone exactly matches. :param day_light_savings: bool or", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "sort by keyword if sort_by: result = list() for row in self.connect.execute(sql): dist", "= engine.connect() def __enter__(self): return self def __exit__(self, *exc_info): self.connect.close() def close(self): \"\"\"Closs", "random import heapq from math import radians, cos from functools import total_ordering from", "filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be a 1-7 letter length!\") # substring", "] def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None,", "None DEFAULT_LIMIT = 5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect = engine.connect()", "print(\"%.6f\" % great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower)))", "<= len(substring) <= 7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"substring", "city=None, area_name=None, area_code=None, population_greater=None, population_less=None, dwellings_greater=None, dwellings_less=None, timezone=None, timezone_greater=None, timezone_less=None, day_light_savings=None, sort_by=None, ascending=True,", "except ValueError: pass # city if city: try: city = find_city(city, best_match=True)[0] filters.append(t.c.city", "not None: filters.append(t.c.longitude >= lng_greater) if lng_less is not None: filters.append(t.c.longitude <= lng_less)", "Attributes: - postalcode: 7 letter, example: \"A0A 0A3\" - city: city name, example:", "# execute query sql = select([t]).where(and_(*filters)) if sort_by: if ascending: clause = t.c[sort_by].asc()", "postal code contains this substring. :param population_greater, population_less: population falls in a range.", "1-7 letter length!\") # substring if substring is not None: if not isinstance(substring,", "None: filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater is not None: filters.append(t.c.elevation >=", "returns=returns, ) def by_postalcode(self, postalcode): \"\"\"Find exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode", "ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by,", "pass # area_name if area_name: try: area_name = find_area_name(area_name, best_match=True)[0] filters.append(t.c.area_name == area_name)", "find '%s'!\" % postalcode) def by_prefix(self, prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix,", "range. :param timezone_greater, timezone_less: timezone falls in a range. :param timezone: int, all", "else: heap = heapq.nlargest(returns, heap, key=lambda x: x[0]) result = [PostalCode._make(row) for _,", "self.elevation = elevation self.population = population self.dwellings = dwellings self.timezone = timezone self.day_light_savings", "lat_greater) if lat_less is not None: filters.append(t.c.latitude <= lat_less) # longitude if lng_greater", "lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad", "__eq__(self, other): return self.postalcode == other.postalcode def __lt__(self, other): return self.postalcode < other.postalcode", "province, city, area_name: search by province, city, area_name. state name could be 2-letter", "== area_name) except ValueError: pass # area_code if area_code: filters.append(t.c.area_code == area_code) #", "# substring if substring is not None: if not isinstance(substring, string_types): raise TypeError(\"substring", "heap sort to find top-K if ascending: heap = heapq.nsmallest(returns, heap, key=lambda x:", "exact postal code. \"\"\" sql = select([t]).where(t.c.postalcode == postalcode.strip().upper()) try: postalcode = PostalCode._make(self.connect.execute(sql).fetchone())", "population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "not None: filters.append(t.c.longitude <= lng_less) # elevation if elevation_greater is not None: filters.append(t.c.elevation", "postalcode = PostalCode._make(self.connect.execute(sql).fetchone()) return postalcode except: raise ValueError(\"Can not find '%s'!\" % postalcode)", "great_circle from .pkg.six import string_types except: from cazipcode.data import ( engine, t, find_province,", "population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, prefix=None, substring=None, province=None, city=None, area_name=None,", "code timezone exactly matches. :param day_light_savings: bool or int, whether using day light", "ValueError(\"substring has to be a 1-7 letter length!\") # province if province: try:", "filters.append(t.c.elevation <= elevation_less) # population if population_greater is not None: filters.append(t.c.population >= population_greater)", "and lng is not None and radius is not None: dist_btwn_lat_deg = 69.172", "is not None: filters.append(t.c.dwellings >= dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings <=", ":param province, city, area_name: search by province, city, area_name. state name could be", "= heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap = heapq.nlargest(returns, heap, key=lambda x:", ") from .pkg.nameddict import Base from .pkg.geo_search import great_circle from .pkg.six import string_types", "lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 /", "= province self.city = city self.area_code = area_code self.area_name = area_name self.latitude =", "area_code if area_code: filters.append(t.c.area_code == area_code) # latitude if lat_greater is not None:", "if day_light_savings is not None: day_light_savings = int(day_light_savings) filters.append(t.c.day_light_savings == day_light_savings) # execute", "len(prefix) <= 7: pattern = \"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has", "== returns: break # sort_by not given, then sort by distance, don't use", "lat, lng if lat is not None and lng is not None and", "sort_by: if ascending: clause = t.c[fields.postalcode].asc() else: clause = t.c[fields.postalcode].desc() sql = sql.order_by(clause)", "area_name) except ValueError: pass # area_code if area_code: filters.append(t.c.area_code == area_code) # latitude", "by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( day_light_savings=day_light_savings, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "len(result) == returns: break # sort_by not given, then sort by distance, don't", "a string\") if 1 <= len(substring) <= 7: pattern = \"%%%s%%\" % substring", "self.connect.close() def close(self): \"\"\"Closs engine. **中文文档** 断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None,", "day light savings. Compare two postal code is actually comparing it's postal code", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode,", "result = [PostalCode._make(row) for row in self.connect.execute(sql)] return result def near(self, lat, lng,", "2-letter abbreviation, or full name, and this search is fuzzy and typo tolerant.", "result.append(PostalCode._make(row)) if len(result) == returns: break # sort_by not given, then sort by", "returns=returns, ) def all_postalcode(self, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT, )", "timezone_less) if timezone: filters.append(t.c.timezone == timezone) # day_light_savings if day_light_savings is not None:", "\"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None,", "city=None, area_code=None, area_name=None, latitude=None, longitude=None, elevation=None, population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode", "prefix, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( prefix=prefix, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self,", "is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad =", "ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by,", "断开与数据库的连接。 \"\"\" self.connect.close() def find(self, lat=None, lng=None, radius=None, lat_greater=None, lat_less=None, lng_greater=None, lng_less=None, elevation_greater=None,", "heap = heapq.nlargest(returns, heap, key=lambda x: x[0]) result = [PostalCode._make(row) for _, row", "def by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending,", "7 letter, example: \"A0A 0A3\" - city: city name, example: \"Ottawa\" - province:", "population if population_greater is not None: filters.append(t.c.population >= population_greater) if population_less is not", "lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is", "= [row[0] for row in self.connect.execute(sql)] result = list() for postalcode in random.sample(all_postalcode,", "dwellings falls in a range. :param timezone_greater, timezone_less: timezone falls in a range.", "= [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\",", "within a 3-d space box. :param province, city, area_name: search by province, city,", "clause else: heap = list() for row in self.connect.execute(sql): # 43.959918, 46.995828, -77.885944,", "lng_less=None, elevation_greater=None, elevation_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat_greater=lat_greater, lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater,", "find_area_name, fields, ) from .pkg.nameddict import Base from .pkg.geo_search import great_circle from .pkg.six", "raise TypeError(\"prefix has to be a string\") if 1 <= len(prefix) <= 7:", "= city self.area_code = area_code self.area_name = area_name self.latitude = latitude self.longitude =", "lat, lng, radius: search near lat, lng with in xxx miles. :param lat_greater,", "a range. :param timezone_greater, timezone_less: timezone falls in a range. :param timezone: int,", "be a string\") if 1 <= len(prefix) <= 7: pattern = \"%s%%\" %", "substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self,", "by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by, ascending=ascending, returns=returns, ) def", "lon_degr_rad lng_upper = lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower,", "\"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None,", "else: raise ValueError(\"lat, lng, radius has to be all given or not.\") #", "- area_code: integer, 3 letter digits, example: 123 - area_name: area name, example:", "if lat_greater is not None: filters.append(t.c.latitude >= lat_greater) if lat_less is not None:", "given or not.\") # prefix if prefix is not None: if not isinstance(prefix,", "if ascending: heap = heapq.nsmallest(returns, heap, key=lambda x: x[0]) else: heap = heapq.nlargest(returns,", "sql.limit(returns) result = [PostalCode._make(row) for row in self.connect.execute(sql)] return result def near(self, lat,", "self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return", "is not None and lng is not None and radius is not None:", "= latitude self.longitude = longitude self.elevation = elevation self.population = population self.dwellings =", "self.longitude = longitude self.elevation = elevation self.population = population self.dwellings = dwellings self.timezone", "prefix is not None: if not isinstance(prefix, string_types): raise TypeError(\"prefix has to be", "latitude: latitude - longitude: longitude - elevation: elevation - population: integer, population -", "filters.append(t.c.dwellings <= dwellings_less) # timezone if timezone_greater is not None: filters.append(t.c.timezone >= timezone_greater)", "__attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\",", "return postalcode except: raise ValueError(\"Can not find '%s'!\" % postalcode) def by_prefix(self, prefix,", "returns=DEFAULT_LIMIT): \"\"\"A powerful search method. :param lat, lng, radius: search near lat, lng", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_substring(self, substring, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( substring=substring,", "engine, t, find_province, find_city, find_area_name, fields, ) from .pkg.nameddict import Base from .pkg.geo_search", "elevation - population: integer, population - dwellings: integer, dwellings - timezone: integer, timezone", "by province, city, area_name. state name could be 2-letter abbreviation, or full name,", "to be all given or not.\") # prefix if prefix is not None:", "lat_less=lat_less, lng_greater=lng_greater, lng_less=lng_less, elevation_greater=elevation_greater, elevation_less=elevation_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_population(self, population_greater=None, population_less=None,", "using day light savings. \"\"\" filters = list() # near lat, lng if", "all given or not.\") # prefix if prefix is not None: if not", "if province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass", "if not isinstance(substring, string_types): raise TypeError(\"substring has to be a string\") if 1", "lat, lng with in xxx miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less:", "( engine, t, find_province, find_city, find_area_name, fields, ) from .pkg.nameddict import Base from", "string_types): raise TypeError(\"prefix has to be a string\") if 1 <= len(prefix) <=", "length!\") # substring if substring is not None: if not isinstance(substring, string_types): raise", "ascending=ascending, returns=returns, ) def by_area_code(self, area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by,", "None: filters.append(t.c.population >= population_greater) if population_less is not None: filters.append(t.c.population <= population_less) #", "dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, )", "= 5 class SearchEngine(object): \"\"\" \"\"\" def __init__(self): self.connect = engine.connect() def __enter__(self):", "\"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\", \"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None,", "# print(\"%.6f, %.6f, %.6f, %.6f\" % (lat_lower, lat_upper, lng_lower, lng_upper)) # print(\"%.6f\" %", "sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self,", "= select([t]).where(and_(*filters)) if sort_by: if ascending: clause = t.c[sort_by].asc() else: clause = t.c[sort_by].desc()", "list() # near lat, lng if lat is not None and lng is", "None: if not isinstance(substring, string_types): raise TypeError(\"substring has to be a string\") if", "import heapq from math import radians, cos from functools import total_ordering from sqlalchemy", "light savings. Compare two postal code is actually comparing it's postal code string.", "= find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass # city if city:", "is not None: filters.append(t.c.longitude >= lng_greater) if lng_less is not None: filters.append(t.c.longitude <=", "area_code, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self,", "select, func, and_ try: from .data import ( engine, t, find_province, find_city, find_area_name,", "timezone - day_light_savings: integer, indicate that whether this zipcode use day light savings.", "lng_less, elevation_greater, elevation_less: search postalcode within a 3-d space box. :param province, city,", "\"%s%%\" % prefix filters.append(t.c.postalcode.like(pattern)) else: raise ValueError(\"prefix has to be a 1-7 letter", "all_postalcode = [row[0] for row in self.connect.execute(sql)] result = list() for postalcode in", "print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude", "search is fuzzy and typo tolerant. :param area_code: int, all postal code area_code", "engine, t, find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search", ">= dwellings_greater) if dwellings_less is not None: filters.append(t.c.dwellings <= dwellings_less) # timezone if", "population=None, dwellings=None, timezone=None, day_light_savings=None): self.postalcode = postalcode self.province = province self.city = city", "longitude self.elevation = elevation self.population = population self.dwellings = dwellings self.timezone = timezone", "# province if province: try: province = find_province(province, best_match=True)[0] filters.append(t.c.province == province) except", "import total_ordering from sqlalchemy import select, func, and_ try: from .data import (", "great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius: heap.append((dist, row)) # Use", "filters.append(t.c.latitude >= lat_greater) if lat_less is not None: filters.append(t.c.latitude <= lat_less) # longitude", "\"dwellings\", \"timezone\", \"day_light_savings\", ] def __init__(self, postalcode=None, province=None, city=None, area_code=None, area_name=None, latitude=None, longitude=None,", ":param area_code: int, all postal code area_code exactly matches. :param prefix: all postal", "for row in self.connect.execute(sql)] result = list() for postalcode in random.sample(all_postalcode, returns): result.append(self.by_postalcode(postalcode))", "area_name: area name, example: \"Ottawa\" - latitude: latitude - longitude: longitude - elevation:", "ascending=ascending, returns=returns, ) def by_area_name(self, area_name, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( area_name=area_name, sort_by=sort_by,", "if lat_less is not None: filters.append(t.c.latitude <= lat_less) # longitude if lng_greater is", "find_area_name, fields, ) from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six", "t, find_province, find_city, find_area_name, fields, ) from cazipcode.pkg.nameddict import Base from cazipcode.pkg.geo_search import", "46.995828, -77.885944, -73.556256 dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <=", "% great_circle((lat, lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude", "ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_city(self, city, sort_by=fields.postalcode,", "pass # area_code if area_code: filters.append(t.c.area_code == area_code) # latitude if lat_greater is", "other): return self.postalcode == other.postalcode def __lt__(self, other): return self.postalcode < other.postalcode def", "area_code) # latitude if lat_greater is not None: filters.append(t.c.latitude >= lat_greater) if lat_less", "\"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\", \"area_code\", \"area_name\", \"latitude\", \"longitude\", \"elevation\", \"population\",", "not None: filters.append(t.c.elevation >= elevation_greater) if elevation_less is not None: filters.append(t.c.elevation <= elevation_less)", "import Base from cazipcode.pkg.geo_search import great_circle from cazipcode.pkg.six import string_types @total_ordering class PostalCode(Base):", "radius: search near lat, lng with in xxx miles. :param lat_greater, lat_less, lng_greater,", "actually comparing it's postal code string. \"\"\" __attrs__ = [ \"postalcode\", \"city\", \"province\",", "filters.append(t.c.latitude <= lat_less) # longitude if lng_greater is not None: filters.append(t.c.longitude >= lng_greater)", "returns=DEFAULT_LIMIT): return self.find( area_code=area_code, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_lat_lng_elevation(self, lat_greater=None, lat_less=None, lng_greater=None,", "by_population(self, population_greater=None, population_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( population_greater=population_greater, population_less=population_less, sort_by=sort_by, ascending=ascending, returns=returns,", "lng with in xxx miles. :param lat_greater, lat_less, lng_greater, lng_less, elevation_greater, elevation_less: search", "to find top-K if ascending: heap = heapq.nsmallest(returns, heap, key=lambda x: x[0]) else:", "lng), (lat_upper, lng_upper))) # print(\"%.6f\" % great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower)", "province = find_province(province, best_match=True)[0] filters.append(t.c.province == province) except ValueError: pass # city if", "# else: if not sort_by: if ascending: clause = t.c[fields.postalcode].asc() else: clause =", "sort_by given, then sort by keyword if sort_by: result = list() for row", "= sql.limit(returns) result = [PostalCode._make(row) for row in self.connect.execute(sql)] return result def near(self,", "= lng - lon_degr_rad lng_upper = lng + lon_degr_rad # print(\"%.6f, %.6f, %.6f,", "if use \"near\" search if radius: # sort_by given, then sort by keyword", "ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None,", "3 letter digits, example: 123 - area_name: area name, example: \"Ottawa\" - latitude:", "def __bool__(self): \"\"\"For Python3 bool() method. \"\"\" return self.postalcode is not None DEFAULT_LIMIT", "filters.append(t.c.longitude >= lng_lower) filters.append(t.c.longitude <= lng_upper) elif lat is None and lng is", "radius is not None: dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(radians(lat)) * 69.172 lat_degr_rad", "for example: \"01A\" :param substring: all postal code contains this substring. :param population_greater,", "dwellings_less: dwellings falls in a range. :param timezone_greater, timezone_less: timezone falls in a", "to be a string\") if 1 <= len(substring) <= 7: pattern = \"%%%s%%\"", "latitude if lat_greater is not None: filters.append(t.c.latitude >= lat_greater) if lat_less is not", "\"Ottawa\" - latitude: latitude - longitude: longitude - elevation: elevation - population: integer,", "day_light_savings def __str__(self): return self.to_json(indent=4) def __eq__(self, other): return self.postalcode == other.postalcode def", "abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower", "is None and lng is None and radius is None: pass else: raise", "postalcode self.province = province self.city = city self.area_code = area_code self.area_name = area_name", "and this search is fuzzy and typo tolerant. :param area_code: int, all postal", "not None: filters.append(t.c.timezone >= timezone_greater) if timezone_less is not None: filters.append(t.c.timezone <= timezone_less)", ":param substring: all postal code contains this substring. :param population_greater, population_less: population falls", "None: filters.append(t.c.latitude >= lat_greater) if lat_less is not None: filters.append(t.c.latitude <= lat_less) #", "by_dwellings(self, dwellings_greater=None, dwellings_less=None, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns,", "if 1 <= len(substring) <= 7: pattern = \"%%%s%%\" % substring filters.append(t.c.postalcode.like(pattern)) else:", "bool() method. \"\"\" return self.postalcode is not None DEFAULT_LIMIT = 5 class SearchEngine(object):", "def random(self, returns=DEFAULT_LIMIT): sql = select([t.c.postalcode]) all_postalcode = [row[0] for row in self.connect.execute(sql)]", "sort_by=sort_by, ascending=ascending, returns=returns, ) def by_province(self, province, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( province=province,", "full name, and this search is fuzzy and typo tolerant. :param area_code: int,", "filters.append(t.c.elevation >= elevation_greater) if elevation_less is not None: filters.append(t.c.elevation <= elevation_less) # population", "filters.append(t.c.longitude <= lng_upper) elif lat is None and lng is None and radius", "None and radius is None: pass else: raise ValueError(\"lat, lng, radius has to", "def near(self, lat, lng, radius, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT): return self.find( lat=lat, lng=lng, radius=radius,", "find_province, find_city, find_area_name, fields, ) from .pkg.nameddict import Base from .pkg.geo_search import great_circle", "dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad", "% great_circle((lat, lng), (lat_lower, lng_lower))) filters.append(t.c.latitude >= lat_lower) filters.append(t.c.latitude <= lat_upper) filters.append(t.c.longitude >=", "not None: if not isinstance(prefix, string_types): raise TypeError(\"prefix has to be a string\")", "-77.885944, -73.556256 dist = great_circle( (lat, lng), (row.latitude, row.longitude)) if dist <= radius:", "= heapq.nlargest(returns, heap, key=lambda x: x[0]) result = [PostalCode._make(row) for _, row in", "__bool__(self): \"\"\"For Python3 bool() method. \"\"\" return self.postalcode is not None DEFAULT_LIMIT =", "all postal code with this prefix, for example: \"01A\" :param substring: all postal", "return self.find( dwellings_greater=dwellings_greater, dwellings_less=dwellings_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_timezone(self, timezone=None, timezone_greater=None, timezone_less=None,", "timezone=timezone, timezone_greater=timezone_greater, timezone_less=timezone_less, sort_by=sort_by, ascending=ascending, returns=returns, ) def by_day_light_savings(self, day_light_savings, sort_by=fields.postalcode, ascending=True, returns=DEFAULT_LIMIT):", "population falls in a range. :param dwellings_greater, dwellings_less: dwellings falls in a range.", "timezone_less is not None: filters.append(t.c.timezone <= timezone_less) if timezone: filters.append(t.c.timezone == timezone) #", "integer, indicate that whether this zipcode use day light savings. Compare two postal" ]
[ "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "express or implied. See the License for the # specific language governing permissions", "with open(file_path) as text_file: # Highlight code code = text_file.read() code_lines = code.split(\"\\n\")", "context_lines_count: The number of lines that will be cut before and after. :return:", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "(the # \"License\"); you may not use this file except in compliance #", "] # # Cut out the snippet start_line_no = max(0, line_no - context_lines_count", "License for the # specific language governing permissions and limitations # under the", "code code = text_file.read() code_lines = code.split(\"\\n\") # Prepend line number code_lines =", "before and after. :return: str \"\"\" with open(file_path) as text_file: # Highlight code", "\"\"\" Prepare code snippet with line numbers and a specific line marked. :param", ":param line_no: Line number :param context_lines_count: The number of lines that will be", "you under the Apache License, Version 2.0 (the # \"License\"); you may not", "(ASF) under one # or more contributor license agreements. See the NOTICE file", "line marked. :param file_path: File nam :param line_no: Line number :param context_lines_count: The", "= [ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no == lno else \"{lno:4} |", "License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "specific language governing permissions and limitations # under the License. def prepare_code_snippet(file_path, line_no,", "# under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet with", "this file # to you under the Apache License, Version 2.0 (the #", "marked. :param file_path: File nam :param line_no: Line number :param context_lines_count: The number", "line=line) for lno, line in enumerate(code_lines, 1) ] # # Cut out the", ":return: str \"\"\" with open(file_path) as text_file: # Highlight code code = text_file.read()", "software distributed under the License is distributed on an # \"AS IS\" BASIS,", "snippet with line numbers and a specific line marked. :param file_path: File nam", "law or agreed to in writing, # software distributed under the License is", "line_no, context_lines_count=5): \"\"\" Prepare code snippet with line numbers and a specific line", "# # Unless required by applicable law or agreed to in writing, #", "# software distributed under the License is distributed on an # \"AS IS\"", "to you under the Apache License, Version 2.0 (the # \"License\"); you may", "code snippet with line numbers and a specific line marked. :param file_path: File", "limitations # under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet", "File nam :param line_no: Line number :param context_lines_count: The number of lines that", "text_file.read() code_lines = code.split(\"\\n\") # Prepend line number code_lines = [ \">{lno:3} |", "file # distributed with this work for additional information # regarding copyright ownership.", "and a specific line marked. :param file_path: File nam :param line_no: Line number", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", ":param file_path: File nam :param line_no: Line number :param context_lines_count: The number of", "# Licensed to the Apache Software Foundation (ASF) under one # or more", "Version 2.0 (the # \"License\"); you may not use this file except in", "for the # specific language governing permissions and limitations # under the License.", "with line numbers and a specific line marked. :param file_path: File nam :param", "for lno, line in enumerate(code_lines, 1) ] # # Cut out the snippet", "under the Apache License, Version 2.0 (the # \"License\"); you may not use", "lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines, 1) ]", "\">{lno:3} | {line}\".format(lno=lno, line=line) if line_no == lno else \"{lno:4} | {line}\".format(lno=lno, line=line)", "and after. :return: str \"\"\" with open(file_path) as text_file: # Highlight code code", "copyright ownership. The ASF licenses this file # to you under the Apache", "ownership. The ASF licenses this file # to you under the Apache License,", "governing permissions and limitations # under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\"", "License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# Highlight code code = text_file.read() code_lines = code.split(\"\\n\") # Prepend line number", "context_lines_count=5): \"\"\" Prepare code snippet with line numbers and a specific line marked.", "[ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no == lno else \"{lno:4} | {line}\".format(lno=lno,", "will be cut before and after. :return: str \"\"\" with open(file_path) as text_file:", "under the License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "additional information # regarding copyright ownership. The ASF licenses this file # to", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied.", "# \"License\"); you may not use this file except in compliance # with", "OF ANY # KIND, either express or implied. See the License for the", "or implied. See the License for the # specific language governing permissions and", "1) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code", "# Unless required by applicable law or agreed to in writing, # software", "in enumerate(code_lines, 1) ] # # Cut out the snippet start_line_no = max(0,", "the snippet start_line_no = max(0, line_no - context_lines_count - 1) end_line_no = line_no", "line number code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no == lno", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Licensed to the Apache Software Foundation (ASF) under one # or more contributor", "line_no - context_lines_count - 1) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no]", "be cut before and after. :return: str \"\"\" with open(file_path) as text_file: #", "code_lines = code.split(\"\\n\") # Prepend line number code_lines = [ \">{lno:3} | {line}\".format(lno=lno,", "distributed under the License is distributed on an # \"AS IS\" BASIS, WITHOUT", "or more contributor license agreements. See the NOTICE file # distributed with this", "{line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines, 1) ] # # Cut out", "# # Cut out the snippet start_line_no = max(0, line_no - context_lines_count -", "to in writing, # software distributed under the License is distributed on an", "file_path: File nam :param line_no: Line number :param context_lines_count: The number of lines", "Cut out the snippet start_line_no = max(0, line_no - context_lines_count - 1) end_line_no", "code = text_file.read() code_lines = code.split(\"\\n\") # Prepend line number code_lines = [", "agreed to in writing, # software distributed under the License is distributed on", "OR CONDITIONS OF ANY # KIND, either express or implied. See the License", "Foundation (ASF) under one # or more contributor license agreements. See the NOTICE", "line in enumerate(code_lines, 1) ] # # Cut out the snippet start_line_no =", "License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet with line numbers and", "lines that will be cut before and after. :return: str \"\"\" with open(file_path)", "Apache Software Foundation (ASF) under one # or more contributor license agreements. See", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "str \"\"\" with open(file_path) as text_file: # Highlight code code = text_file.read() code_lines", "under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet with line", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License, Version 2.0 (the # \"License\"); you may not use this file except", "Highlight code code = text_file.read() code_lines = code.split(\"\\n\") # Prepend line number code_lines", "code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no == lno else \"{lno:4}", "to the Apache Software Foundation (ASF) under one # or more contributor license", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "this file except in compliance # with the License. You may obtain a", "The number of lines that will be cut before and after. :return: str", "and limitations # under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code", "in compliance # with the License. You may obtain a copy of the", "implied. See the License for the # specific language governing permissions and limitations", "may not use this file except in compliance # with the License. You", "or agreed to in writing, # software distributed under the License is distributed", "as text_file: # Highlight code code = text_file.read() code_lines = code.split(\"\\n\") # Prepend", "use this file except in compliance # with the License. You may obtain", "ASF licenses this file # to you under the Apache License, Version 2.0", "line_no == lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines,", "number of lines that will be cut before and after. :return: str \"\"\"", "Unless required by applicable law or agreed to in writing, # software distributed", "= code.split(\"\\n\") # Prepend line number code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line)", "Software Foundation (ASF) under one # or more contributor license agreements. See the", "= text_file.read() code_lines = code.split(\"\\n\") # Prepend line number code_lines = [ \">{lno:3}", "number :param context_lines_count: The number of lines that will be cut before and", "{line}\".format(lno=lno, line=line) if line_no == lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno,", "distributed with this work for additional information # regarding copyright ownership. The ASF", "# distributed with this work for additional information # regarding copyright ownership. The", "license agreements. See the NOTICE file # distributed with this work for additional", "the License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "\"License\"); you may not use this file except in compliance # with the", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code =", "regarding copyright ownership. The ASF licenses this file # to you under the", "prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet with line numbers and a specific", "- 1) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines", "# KIND, either express or implied. See the License for the # specific", "= max(0, line_no - context_lines_count - 1) end_line_no = line_no + context_lines_count code_lines", "max(0, line_no - context_lines_count - 1) end_line_no = line_no + context_lines_count code_lines =", "this work for additional information # regarding copyright ownership. The ASF licenses this", "with this work for additional information # regarding copyright ownership. The ASF licenses", "context_lines_count - 1) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join", "ANY # KIND, either express or implied. See the License for the #", "specific line marked. :param file_path: File nam :param line_no: Line number :param context_lines_count:", "See the NOTICE file # distributed with this work for additional information #", "contributor license agreements. See the NOTICE file # distributed with this work for", "either express or implied. See the License for the # specific language governing", "if line_no == lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line in", "the License. You may obtain a copy of the License at # #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "<reponame>shrutimantri/airflow # Licensed to the Apache Software Foundation (ASF) under one # or", "the NOTICE file # distributed with this work for additional information # regarding", "in writing, # software distributed under the License is distributed on an #", "the Apache Software Foundation (ASF) under one # or more contributor license agreements.", "agreements. See the NOTICE file # distributed with this work for additional information", "start_line_no = max(0, line_no - context_lines_count - 1) end_line_no = line_no + context_lines_count", "not use this file except in compliance # with the License. You may", "cut before and after. :return: str \"\"\" with open(file_path) as text_file: # Highlight", "writing, # software distributed under the License is distributed on an # \"AS", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "# or more contributor license agreements. See the NOTICE file # distributed with", "nam :param line_no: Line number :param context_lines_count: The number of lines that will", "of lines that will be cut before and after. :return: str \"\"\" with", "lno, line in enumerate(code_lines, 1) ] # # Cut out the snippet start_line_no", "| {line}\".format(lno=lno, line=line) if line_no == lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for", "after. :return: str \"\"\" with open(file_path) as text_file: # Highlight code code =", "See the License for the # specific language governing permissions and limitations #", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the", "# Prepend line number code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no", "+ context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code = \"\\n\".join(code_lines) return code", "NOTICE file # distributed with this work for additional information # regarding copyright", "text_file: # Highlight code code = text_file.read() code_lines = code.split(\"\\n\") # Prepend line", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "2.0 (the # \"License\"); you may not use this file except in compliance", "language governing permissions and limitations # under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5):", "snippet start_line_no = max(0, line_no - context_lines_count - 1) end_line_no = line_no +", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the # specific language governing permissions and limitations # under the License. def", "that will be cut before and after. :return: str \"\"\" with open(file_path) as", "# with the License. You may obtain a copy of the License at", "line=line) if line_no == lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line", "line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code = \"\\n\".join(code_lines) return", "else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines, 1) ] #", "KIND, either express or implied. See the License for the # specific language", "# specific language governing permissions and limitations # under the License. def prepare_code_snippet(file_path,", "# Cut out the snippet start_line_no = max(0, line_no - context_lines_count - 1)", "- context_lines_count - 1) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] #", "permissions and limitations # under the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare", "= line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code = \"\\n\".join(code_lines)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "# regarding copyright ownership. The ASF licenses this file # to you under", "the Apache License, Version 2.0 (the # \"License\"); you may not use this", "Apache License, Version 2.0 (the # \"License\"); you may not use this file", "numbers and a specific line marked. :param file_path: File nam :param line_no: Line", ":param context_lines_count: The number of lines that will be cut before and after.", "more contributor license agreements. See the NOTICE file # distributed with this work", "code.split(\"\\n\") # Prepend line number code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line) if", "under one # or more contributor license agreements. See the NOTICE file #", "# to you under the Apache License, Version 2.0 (the # \"License\"); you", "required by applicable law or agreed to in writing, # software distributed under", "line_no: Line number :param context_lines_count: The number of lines that will be cut", "out the snippet start_line_no = max(0, line_no - context_lines_count - 1) end_line_no =", "def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet with line numbers and a", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "number code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no == lno else", "enumerate(code_lines, 1) ] # # Cut out the snippet start_line_no = max(0, line_no", "1) ] # # Cut out the snippet start_line_no = max(0, line_no -", "compliance # with the License. You may obtain a copy of the License", "Line number :param context_lines_count: The number of lines that will be cut before", "the License. def prepare_code_snippet(file_path, line_no, context_lines_count=5): \"\"\" Prepare code snippet with line numbers", "\"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines, 1) ] # #", "Prepare code snippet with line numbers and a specific line marked. :param file_path:", "by applicable law or agreed to in writing, # software distributed under the", "for additional information # regarding copyright ownership. The ASF licenses this file #", "line numbers and a specific line marked. :param file_path: File nam :param line_no:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See", "with the License. You may obtain a copy of the License at #", "information # regarding copyright ownership. The ASF licenses this file # to you", "The ASF licenses this file # to you under the Apache License, Version", "file except in compliance # with the License. You may obtain a copy", "CONDITIONS OF ANY # KIND, either express or implied. See the License for", "open(file_path) as text_file: # Highlight code code = text_file.read() code_lines = code.split(\"\\n\") #", "file # to you under the Apache License, Version 2.0 (the # \"License\");", "the License for the # specific language governing permissions and limitations # under", "\"\"\" with open(file_path) as text_file: # Highlight code code = text_file.read() code_lines =", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "work for additional information # regarding copyright ownership. The ASF licenses this file", "applicable law or agreed to in writing, # software distributed under the License", "one # or more contributor license agreements. See the NOTICE file # distributed", "== lno else \"{lno:4} | {line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines, 1)", "except in compliance # with the License. You may obtain a copy of", "| {line}\".format(lno=lno, line=line) for lno, line in enumerate(code_lines, 1) ] # # Cut", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "you may not use this file except in compliance # with the License.", "a specific line marked. :param file_path: File nam :param line_no: Line number :param", "licenses this file # to you under the Apache License, Version 2.0 (the", "Prepend line number code_lines = [ \">{lno:3} | {line}\".format(lno=lno, line=line) if line_no ==", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either" ]
[ "\"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema}", "SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS", "%(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _types = {", "datacl IS NULL AND datname = current_database() ) SELECT grants.priv AS key, NULL", "truth table: # # FOR GRANT | no grant | partial grant |", "NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY 1, 2 ), all_grants", "AND grants.rels IS NULL) -- ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON", "owner, 0 AS grantee, 'EXECUTE' AS priv FROM pg_roles AS rol LEFT OUTER", "datname = current_database() ) SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public')", "all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]), ]) def make_well_known_privileges():", "ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\", "1) IS NOT NULL AND grants.rels IS NULL) -- ORDER BY 1, 2", "NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS", "WHERE rolname IS NOT NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants", "(aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE'", "\" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl',", "ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces", "fully granted # -----------+----------+---------------+--------------- # no tables | NOOP | N/D | N/D", "if '%(t)' in fmt: for t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t)", "proname, 0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL", "TYPE, privilege), (name, [all_, default, global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw", "fmt_args = dict( t=t, # Loose SQL formatting t_array='(%s)' % (', '.join(['%r' %", "all_grants AS grants ON relnamespace = nsp.oid AND grantee = rol.oid AND privilege_type", "irrelevant on # this schema. # # Here is a truth table: #", "t=t) else: yield fmt % fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t =", "procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace =", "]) # This is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__']", "else: if v['shared_query'] not in shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query']) v", "AS nsp CROSS JOIN all_roles AS rol LEFT OUTER JOIN all_grants AS grants", "# -----------+----------+---------------+--------------- # no tables | NOOP | N/D | N/D # -----------+----------+---------------+---------------", "'USAGE'), ]) # This is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS'))", "( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname,", "JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname = current_database() ) SELECT grants.priv", "N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT |", "to manage partial grant. But the # trickiest comes when there is no", "nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY 1, 2 \"\"\")", "_types.get(TYPE) fmt_args = dict( t=t, # Loose SQL formatting t_array='(%s)' % (', '.join(['%r'", "OUTER JOIN grants ON pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE NOT", "privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]), ]) def make_well_known_privileges(): privileges =", "'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias]", "-----------+----------+---------------+--------------- # # When namespace has NO tables, we always return a row", "default]), ]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__',", "tables | NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables |", "in v['keys'] ])) privilege[k] = v return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS',", "N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE | REVOKE #", "SELECT priv AS key, NULL AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS", "nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <>", "a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege", "'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER", "(SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS procs FROM (", "partial grant | fully granted # -----------+----------+---------------+--------------- # no tables | NOOP |", "priv AS key, NULL AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS \"full\",", "0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type", "IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee", "this case, is it # granted or revoked ? We have to tell", "grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS", "dict() for k, v in tpl.items(): if isinstance(v, string_types): v = v %", "nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee", "LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE rolname IS", "to availables tables to # determine if privilege is fully granted. If the", "rel ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT LIKE", "AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol", "dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s", "%(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY 1,", "privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege,", "WHERE defaclacl IS NULL ) SELECT priv AS key, NULL AS \"schema\", COALESCE(rolname,", "WHERE datacl IS NULL AND datname = current_database() ) SELECT grants.priv AS key,", "ldap2pg that this grant is irrelevant on # this schema. # # Here", "priv FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants GROUP BY 1,", "string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee,", "it is both granted and revoked. # # When namespace has tables, we", "for key in v['keys'] ])) privilege[k] = v return name, privilege def make_proc_privileges(", "roles LEFT OUTER JOIN grants ON pronamespace = nsp.oid AND grants.grantee = roles.oid", "_defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is a compatibility alias. privileges['__usage_on_types__'] =", "make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]), ]) def", "FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles", "\"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type", "\"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s", "privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege,", "grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM", "= ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM", "FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype AS key, nspname, COALESCE(rolname,", "make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw", "fully granted. If the privilege is not granted at # all, we drop", "make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is", "OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE rolname IS NOT", "{schema} FROM {role}\"), ) _types = { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'),", "both granted and revoked. # # When namespace has tables, we compare grants", "ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee =", "'EXECUTE' AS priv FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl", "grants ON pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1)", "import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS", "'__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__', '__trigger__', '__truncate__', '__update_on_tables__', ]", "grant | partial grant | fully granted # -----------+----------+---------------+--------------- # no tables |", "BY 1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA", "rolname IS NOT NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS", "dict( t=t, # Loose SQL formatting t_array='(%s)' % (', '.join(['%r' % i for", "revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM", "dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s", "for t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else: yield fmt %", "make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw", "WHERE datname = current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY'))", "NULL) -- ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN", ") SELECT priv AS key, NULL AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE", "%(privilege)s ON SCHEMA {schema} FROM {role};\", ) # ALL TABLES is tricky because", "not in shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query']) v = v.copy() v['keys']", ") _types = { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES':", "from itertools import chain from textwrap import dedent from .utils import string_types shared_queries", "'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES',", "( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname =", "FROM {role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON", "# When namespace has tables, we compare grants to availables tables to #", "AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' ||", "raise Exception(\"Unknown query %s.\" % v['shared_query']) v = v.copy() v['keys'] = list(chain(*[ format_keys(key,", "defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL ) SELECT", "revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON", "make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl,", "{schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM", "a namespace. In this case, is it # granted or revoked ? We", "array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN", "%(privilege)s ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']),", "all_roles AS ( SELECT 0 AS oid, 'public' AS rolname UNION SELECT oid,", "name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ =", "string_types): v = v % fmt_args else: if v['shared_query'] not in shared_queries: raise", "'__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__']", "NULL) AND (priv IS NULL OR priv = '%(privilege)s') AND nspname NOT LIKE", "= '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt %", "TABLES is tricky because we have to manage partial grant. But the #", "SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM", "rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND", "AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM", "AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner", "AS grants ON relnamespace = nsp.oid AND grantee = rol.oid AND privilege_type =", ") SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs", "NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY 1, 2 \"\"\") )", "# -----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT | NOOP # -----------+----------+---------------+---------------", "TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl = dict(", "fmt % dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs def make_privilege(tpl, name, TYPE,", "def make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE) fmt_args = dict( t=t, #", "return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]),", "privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__'", "that this grant is irrelevant on # this schema. # # Here is", "ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s", "namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels", "AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT", "'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL", "1, 2, 3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER", "{role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee,", "TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}", "AS grantee, 'EXECUTE' AS priv FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl", "is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for", "BY proname) AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv", "= rol.oid WHERE grantee = 0 OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\", "grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON", "name = namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default,", "NULL AND datname = current_database() ) SELECT grants.priv AS key, NULL as namespace,", "FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants GROUP BY 1, 2,", "tables in a namespace. In this case, is it # granted or revoked", "COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON", "OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee = 0", "%(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl = dict(", "ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _types = { 'FUNCTIONS': ('f',),", "'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace", "grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT", "dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def", "defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl", "AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER", "FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"),", "key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM", "ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=(", "'pg_toast' ORDER BY 1, 2 \"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']),", "% i for i in t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege =", "AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE defaclnamespace =", "global_def, TYPE, privilege), (name, [all_, default, global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'):", "nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT", "'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),)", "'.join(['%r' % i for i in t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege", "Loose SQL formatting t_array='(%s)' % (', '.join(['%r' % i for i in t", "LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND relkind IN", "UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE", "with full as NULL, # meaning privilege is irrelevant : it is both", "default, TYPE, privilege), (name, [all_, default]), ]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl,", "if isinstance(v, string_types): v = v % fmt_args else: if v['shared_query'] not in", "'TYPES', 'USAGE'), ]) # This is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE',", "'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'),", "# noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=(", "'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update(", "{owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER", "= nsp.oid AND grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels,", "if v['shared_query'] not in shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query']) v =", "= dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN", ": it is both granted and revoked. # # When namespace has tables,", "clause to ensure the privilege is considered as # revoked. # _allrelacl_tpl =", "-----------+----------+---------------+--------------- # no tables | NOOP | N/D | N/D # -----------+----------+---------------+--------------- #", "in fmt: for t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else: yield", "{schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR", "# 1+ tables | GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- # #", "_allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT", "CROSS JOIN roles LEFT OUTER JOIN grants ON pronamespace = nsp.oid AND grants.grantee", "LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace =", "-----------+----------+---------------+--------------- # # FOR REVOKE | no grant | partial grant | fully", "def WHERE defaclnamespace = 0 UNION SELECT rol.oid AS owner, 0 AS grantee,", "make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default, global_def]), ])", "# When namespace has NO tables, we always return a row with full", "rol ON grants.grantee = rol.oid WHERE (grantee = 0 OR rolname IS NOT", "= 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee,", "nsp.oid AND grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1)", "# all, we drop the row in WHERE clause to ensure the privilege", "nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv", "tables, we compare grants to availables tables to # determine if privilege is", "IS NULL OR priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER", "= dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None,", "return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_", "fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name =", "This is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__']", "TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"),", "OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE grantee = 0", "5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type", "nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants", "'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid", "tpl.items(): if isinstance(v, string_types): v = v % fmt_args else: if v['shared_query'] not", "= current_database() ) SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public') FROM", "2, 3 ), all_roles AS ( SELECT 0 AS oid, 'public' AS rolname", "{schema} FROM {role};\", ) # ALL TABLES is tricky because we have to", "[ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] =", "%(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN", "ON relnamespace = nsp.oid AND grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE", "2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO", "'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for t in", "'public' AS rolname UNION SELECT oid, rolname from pg_roles ) SELECT nspname, rolname,", "]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__'", "a truth table: # # FOR GRANT | no grant | partial grant", "= ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES'))", "])) privilege[k] = v return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw", "IS NOT NULL AND grants.rels IS NULL) -- ORDER BY 1, 2 \"\"\"),", "AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS procs FROM", "OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee", "= dict() for k, v in tpl.items(): if isinstance(v, string_types): v = v", "# _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname,", "None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is a compatibility", "FROM namespace_rels AS nsp CROSS JOIN all_roles AS rol LEFT OUTER JOIN all_grants", "oid, rolname from pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[]", "array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER", "rol ON grants.grantee = rol.oid WHERE rolname IS NOT NULL OR grantee =", "for k, v in tpl.items(): if isinstance(v, string_types): v = v % fmt_args", "[all_, default]), ]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl,", "grantee = 0 OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS", "LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON", "\"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants", "FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"),", "# ALL TABLES is tricky because we have to manage partial grant. But", "AND (priv IS NULL OR priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%'", "shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type", "(aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public')", "to tell ldap2pg that this grant is irrelevant on # this schema. #", "<> 'pg_toast' ORDER BY 1, 2 \"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl',", "0 WHERE defaclacl IS NULL ) SELECT priv AS key, NULL AS \"schema\",", "SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE", "SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\", ) #", "'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias] =", "revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid,", "1, 2 \"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON", "k, v in tpl.items(): if isinstance(v, string_types): v = v % fmt_args else:", "NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT |", "type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}", "pro ON pro.pronamespace = nsp.oid GROUP BY 1, 2 ), roles AS (", "pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee = 0 OR rolname", "= 0 UNION SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS priv", "TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default, global_def]), ]) def make_rel_privileges(", "# FOR GRANT | no grant | partial grant | fully granted #", "%(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner}", "BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS", "( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT", "IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT", "END AS \"full\" FROM namespace_rels AS nsp CROSS JOIN all_roles AS rol LEFT", "= dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER", "ORDER BY proname) AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS", "pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee", "{role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE", "\"full\" FROM namespace_rels AS nsp CROSS JOIN all_roles AS rol LEFT OUTER JOIN", "AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace )", "'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is a compatibility alias.", "drop the row in WHERE clause to ensure the privilege is considered as", "privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__', '__trigger__', '__truncate__', '__update_on_tables__', ] return privileges", "'%(t)' in fmt: for t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else:", "or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for k, v in tpl.items():", "OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace,", "roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' )", "dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'),", "grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER JOIN", "{schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\",", "SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER", "AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS", "fmt % fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE) fmt_args =", "THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces AS", "%(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA", "# Loose SQL formatting t_array='(%s)' % (', '.join(['%r' % i for i in", "make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege(", "% fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return", "BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1,", "\"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database}", "def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'),", "# Here is a truth table: # # FOR GRANT | no grant", "(aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key,", "TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]), ]) def make_well_known_privileges(): privileges", "'__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw", "| N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE", "type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY", "'pg_toast' GROUP BY 1, 2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee,", "privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default, global_def]),", "all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt", "pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants GROUP BY 1, 2, 3", "AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS", "grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM", "revoked ? We have to tell ldap2pg that this grant is irrelevant on", "IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA", "datname = current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS", "% (', '.join(['%r' % i for i in t or []])), TYPE=TYPE, privilege=privilege.upper(),", "AS rel ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT", "grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE grantee", "SELECT oid, rolname from pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels =", "|| objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole)", "{role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\", ) # ALL TABLES is", "schema. # # Here is a truth table: # # FOR GRANT |", "dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name", "]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None,", "FROM {role};\", ) # ALL TABLES is tricky because we have to manage", "ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL", "TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default,", "%(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']),", "# no tables | NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+", "AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs", "keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema}", "def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' %", "make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'),", "all, we drop the row in WHERE clause to ensure the privilege is", "to ensure the privilege is considered as # revoked. # _allrelacl_tpl = dict(", "( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace", "%(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\", "ON SCHEMA {schema} FROM {role};\", ) # ALL TABLES is tricky because we", "%(t_array)s GROUP BY 1, 2, 3 ), all_roles AS ( SELECT 0 AS", "in tpl.items(): if isinstance(v, string_types): v = v % fmt_args else: if v['shared_query']", "'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__',", "priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2", "nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND relkind", "IS NULL ) AS grants GROUP BY 1, 2, 3 ), namespaces AS", "'__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw", "for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias =", "pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1, 2, 3 ), all_roles AS", "name, TYPE, privilege): t = _types.get(TYPE) fmt_args = dict( t=t, # Loose SQL", "PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl", "is not granted at # all, we drop the row in WHERE clause", "defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE defaclnamespace", "nsp CROSS JOIN all_roles AS rol LEFT OUTER JOIN all_grants AS grants ON", "%(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _types = { 'FUNCTIONS': ('f',), 'TABLES':", "'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if", "= defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE", "), all_roles AS ( SELECT 0 AS oid, 'public' AS rolname UNION SELECT", "priv, array_agg(DISTINCT proname ORDER BY proname) AS procs FROM ( SELECT pronamespace, proname,", "partial grant. But the # trickiest comes when there is no tables in", "t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for k, v in", "WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL) -- ORDER", "'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ]", "0, 'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL", "DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO", ".utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee", "OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s", "GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\"", "nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels,", "NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER BY", "(aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.* FROM", "%(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\",", "# 1+ tables | NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- # #", "nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER BY 1, 2,", "# This is a compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] =", "relkind IN %(t_array)s GROUP BY 1, 2, 3 ), all_roles AS ( SELECT", "make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for", "ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES", "FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM", "= dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE", "= '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_,", "grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN", "SELECT priv || '_on_' || objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname,", "= dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw", "_datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\",", "dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege),", "t_array='(%s)' % (', '.join(['%r' % i for i in t or []])), TYPE=TYPE,", "-- ORDER BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS (", "revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\", ) # ALL TABLES is tricky", "( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs FROM", "rol ON grants.grantee = rol.oid WHERE grantee = 0 OR rolname IS NOT", "grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname,", "'__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl,", "'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER BY 1, 2, 3, 5 \"\"\"),", "(priv IS NULL OR priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' --", "ARRAY[]::name[]) END AS \"full\" FROM namespaces AS nsp CROSS JOIN roles LEFT OUTER", "pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc", "ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl',", "privilege = dict() for k, v in tpl.items(): if isinstance(v, string_types): v =", "( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class", "owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid", "AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT", "rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname, rolname, CASE WHEN", "fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE) fmt_args = dict( t=t,", "has NO tables, we always return a row with full as NULL, #", "If the privilege is not granted at # all, we drop the row", "nspname <> 'pg_toast' GROUP BY 1, 2 ), all_grants AS ( SELECT relnamespace,", "v return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())", "NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY", "make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]), ]) def make_well_known_privileges(): privileges = dict([", "% fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name,", "NULL AND grants.procs IS NULL) AND (priv IS NULL OR priv = '%(privilege)s')", "UNION SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS priv FROM pg_roles", "AND nspname <> 'pg_toast' -- ORDER BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\", "globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv", "owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE defaclnamespace = 0", "(name, [all_, default, global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(),", "NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname,", "= namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE,", "we have to manage partial grant. But the # trickiest comes when there", "pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs =", "Exception(\"Unknown query %s.\" % v['shared_query']) v = v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args)", "= COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces AS nsp CROSS JOIN roles", "(aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee,", "'__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__',", "AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv", "all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__'", "privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [", "'%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL) --", "AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname", "SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl = dict(", "), roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public'", "AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS", "global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ =", "grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE rolname", "CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END", "{database} FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT", "ON pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS", "AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL ) SELECT priv AS key,", "JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE", "'_on_' || objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full,", "t = _types.get(TYPE) fmt_args = dict( t=t, # Loose SQL formatting t_array='(%s)' %", "TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS", "pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP BY 1, 2 ), roles", "AND relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast'", "considered as # revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS", "2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=(", "\"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\",", "type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def =", "tables, we always return a row with full as NULL, # meaning privilege", "defaclacl IS NULL ) SELECT priv AS key, NULL AS \"schema\", COALESCE(rolname, 'public')", "'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants GROUP", "'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] =", "| NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE | no grant | partial", ") _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee, priv,", "'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY 1, 2 \"\"\") ) _datacl_tpl =", "type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s", "AND nspname <> 'pg_toast' GROUP BY 1, 2 ), all_grants AS ( SELECT", "grants.grantee = rol.oid WHERE (grantee = 0 OR rolname IS NOT NULL) AND", "key in v['keys'] ])) privilege[k] = v return name, privilege def make_proc_privileges( privilege,", "BY 1, 2 \"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s", "3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee,", "IS NULL ) SELECT priv AS key, NULL AS \"schema\", COALESCE(rolname, 'public') as", "nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND", "AS oid, 'public' AS rolname UNION SELECT oid, rolname from pg_roles ) SELECT", "ON %(TYPE)s FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER", "on # this schema. # # Here is a truth table: # #", "nsp CROSS JOIN roles LEFT OUTER JOIN grants ON pronamespace = nsp.oid AND", "ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\", "# trickiest comes when there is no tables in a namespace. In this", "NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER BY 1, 2, 3,", "grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee", "pro.proname ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN", "comes when there is no tables in a namespace. In this case, is", "3 ), all_roles AS ( SELECT 0 AS oid, 'public' AS rolname UNION", "as NULL, # meaning privilege is irrelevant : it is both granted and", "AS owner, 0 AS grantee, 'EXECUTE' AS priv FROM pg_roles AS rol LEFT", "{owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\",", "When namespace has NO tables, we always return a row with full as", "'__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__', '__trigger__', '__truncate__',", "% fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return", "key, NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS", "{owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl", "default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt", "ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s", "2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname)", "FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'),", "all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels", "defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype AS", "= rol.oid WHERE rolname IS NOT NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\", "rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace", "proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS", "case, is it # granted or revoked ? We have to tell ldap2pg", "namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege),", "we always return a row with full as NULL, # meaning privilege is", "namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS", "AS rol ON grants.grantee = rol.oid WHERE grantee = 0 OR rolname IS", "relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP", "= ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES',", "| N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT", "% fmt_args else: if v['shared_query'] not in shared_queries: raise Exception(\"Unknown query %s.\" %", "all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_,", "_defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner}", "{role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), )", "# # When namespace has tables, we compare grants to availables tables to", "AS pro ON pro.pronamespace = nsp.oid GROUP BY 1, 2 ), roles AS", "NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER", "current_database() ) SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public') FROM grants", "'__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [", "relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1, 2,", "pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE rolname IS NOT NULL OR", "type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname),", "WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS", "| NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When namespace has", "shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query']) v = v.copy() v['keys'] = list(chain(*[", "GROUP BY 1, 2 ), roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles", "for i in t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for", "JOIN all_grants AS grants ON relnamespace = nsp.oid AND grantee = rol.oid AND", "pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0", "NULL AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner", "# granted or revoked ? We have to tell ldap2pg that this grant", "SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl", "\"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS", "1+ tables | GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR", "1+ tables | NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When", "SCHEMA {schema} FROM {role};\", ) # ALL TABLES is tricky because we have", "= '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' %", "BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS", "is irrelevant : it is both granted and revoked. # # When namespace", "{owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR", "= dict( t=t, # Loose SQL formatting t_array='(%s)' % (', '.join(['%r' % i", "COALESCE(rolname, 'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT", "NULL ) AS grants GROUP BY 1, 2, 3 ), namespaces AS (", "IS NOT NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS (", "ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl = dict(", "AND nspname <> 'pg_toast' ORDER BY 1, 2 \"\"\") ) _datacl_tpl = dict(", "'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt:", "as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON", "| fully granted # -----------+----------+---------------+--------------- # no tables | NOOP | N/D |", "= current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q", "we compare grants to availables tables to # determine if privilege is fully", "rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles", "yield fmt % fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE) fmt_args", "namespace has NO tables, we always return a row with full as NULL,", "grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE", "rol.oid WHERE (grantee = 0 OR rolname IS NOT NULL) AND nspname NOT", "revoked. # # When namespace has tables, we compare grants to availables tables", "tables to # determine if privilege is fully granted. If the privilege is", "procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION", "PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role};", "WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS", "LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY 1, 2 \"\"\") ) _datacl_tpl", "SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace", "query %s.\" % v['shared_query']) v = v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for", "privilege[k] = v return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw =", "ALL TABLES is tricky because we have to manage partial grant. But the", "UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN", "full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace", "| REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When namespace has NO tables,", ") _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO", "1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\",", "ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS nsp CROSS", "as # revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS (", "FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid", "grants.rels IS NULL) -- ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL", "rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace =", "rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace, defaclrole,", "the privilege is not granted at # all, we drop the row in", "inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL)", "'__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE,", "COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS nsp CROSS JOIN all_roles AS", "fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl,", "this grant is irrelevant on # this schema. # # Here is a", "is fully granted. If the privilege is not granted at # all, we", "0 OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT", "NULL OR priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY", "privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__',", "= rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL ) SELECT priv", "AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM", "nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\"", "SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname, rolname,", "AS key, NULL AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner)", "NULL ) SELECT priv AS key, NULL AS \"schema\", COALESCE(rolname, 'public') as rolname,", "'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)' in", "'__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ])", "| GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE |", "FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT", "ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT", "= _types.get(TYPE) fmt_args = dict( t=t, # Loose SQL formatting t_array='(%s)' % (',", "| no grant | partial grant | fully granted # -----------+----------+---------------+--------------- # no", "%(TYPE)s FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT", "= namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE,", "AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS", "in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs def", "proname ORDER BY proname) AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type", "FROM pg_default_acl AS def WHERE defaclnamespace = 0 UNION SELECT rol.oid AS owner,", "AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc", "q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname = current_database() )", "privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default", "AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid =", "AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee =", "\"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE", "grant | fully granted # -----------+----------+---------------+--------------- # no tables | NOOP | N/D", "We have to tell ldap2pg that this grant is irrelevant on # this", "grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv ||", "\" GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE", "pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid", "IN %(t_array)s GROUP BY 1, 2, 3 ), all_roles AS ( SELECT 0", "%(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH", "'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND", "revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl", "TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s ON", "= v return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(),", "{role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR", "'public') AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee", "alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE',", "BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole", "make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is a compatibility alias. privileges['__usage_on_types__']", "AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles", "priv FROM pg_default_acl AS def WHERE defaclnamespace = 0 UNION SELECT rol.oid AS", "AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants GROUP BY", "grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO", "tables | NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When namespace", "meaning privilege is irrelevant : it is both granted and revoked. # #", "OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP BY 1, 2", "WHERE clause to ensure the privilege is considered as # revoked. # _allrelacl_tpl", "LEFT OUTER JOIN all_grants AS grants ON relnamespace = nsp.oid AND grantee =", "(privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES'))", "granted # -----------+----------+---------------+--------------- # no tables | NOOP | N/D | N/D #", "COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN", "= dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE", "0 UNION SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS priv FROM", "NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE | no grant | partial grant", "REVOKE # -----------+----------+---------------+--------------- # # When namespace has NO tables, we always return", "= '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL)", "NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee,", "SCHEMA {schema} FROM {role}\"), ) _types = { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v',", "privilege is fully granted. If the privilege is not granted at # all,", "priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname", "= v % fmt_args else: if v['shared_query'] not in shared_queries: raise Exception(\"Unknown query", "IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' --", "return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE,", "WHERE defaclnamespace = 0 UNION SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE'", "the # trickiest comes when there is no tables in a namespace. In", "privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__',", "= COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS nsp CROSS JOIN all_roles", "fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl,", "('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)'", "AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1, 2, 3", "NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON", "tell ldap2pg that this grant is irrelevant on # this schema. # #", "['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER',", "grants.grantee = rol.oid WHERE grantee = 0 OR rolname IS NOT NULL \"\"\"),", "IS NOT NULL AND grants.procs IS NULL) AND (priv IS NULL OR priv", "GRANT | no grant | partial grant | fully granted # -----------+----------+---------------+--------------- #", "TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\", ) # ALL TABLES", "pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0,", "type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON", "manage partial grant. But the # trickiest comes when there is no tables", "AS nsp CROSS JOIN roles LEFT OUTER JOIN grants ON pronamespace = nsp.oid", "datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM", "inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT", "DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl", "rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS priv FROM pg_roles AS rol", "WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM", ") _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO", "v in tpl.items(): if isinstance(v, string_types): v = v % fmt_args else: if", "NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP |", "1, 2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY", "rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel", "trickiest comes when there is no tables in a namespace. In this case,", "nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS nsp CROSS JOIN", "grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace", "FOR ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT", "SQL formatting t_array='(%s)' % (', '.join(['%r' % i for i in t or", "ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s", "SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES", "-- ORDER BY 1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s", "full as NULL, # meaning privilege is irrelevant : it is both granted", "rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast'", "name = namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default,", "SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE", "privilege), (name, [all_, default]), ]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None,", "because we have to manage partial grant. But the # trickiest comes when", "%(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \"", "In this case, is it # granted or revoked ? We have to", "namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__'", "SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname = current_database()", "fmt_kwargs): if '%(t)' in fmt: for t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs,", "privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)]", "inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s ON", "(VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS", "fmt_args) for key in v['keys'] ])) privilege[k] = v return name, privilege def", "{role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl',", "defacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS", "REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']),", "availables tables to # determine if privilege is fully granted. If the privilege", "tables | GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE", "1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole AS", "When namespace has tables, we compare grants to availables tables to # determine", "we drop the row in WHERE clause to ensure the privilege is considered", "% fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege),", "'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This", "DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM", "PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=( \"ALTER", "fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([", "| GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE | no grant", "AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER BY 1,", "else: yield fmt % fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE)", "ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema}", "IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants", "AS grants GROUP BY 1, 2, 3 ), namespaces AS ( SELECT nsp.oid,", "{role};\", ) # ALL TABLES is tricky because we have to manage partial", "namespace. In this case, is it # granted or revoked ? We have", "), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL)", "CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END", "\"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON", "AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels", "COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee =", "AND grants.procs IS NULL) AND (priv IS NULL OR priv = '%(privilege)s') AND", "ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%'", "the row in WHERE clause to ensure the privilege is considered as #", "% fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE) fmt_args = dict(", "= dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname", "AS \"full\" FROM namespaces AS nsp CROSS JOIN roles LEFT OUTER JOIN grants", "defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee", "fmt: for t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else: yield fmt", "(aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s", "priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.* FROM (VALUES (0,", "privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT',", "fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([", "FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1, 2, 3 ), all_roles", "granted. If the privilege is not granted at # all, we drop the", "priv FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole", "# determine if privilege is fully granted. If the privilege is not granted", "pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE", "AS priv FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON", "NULL AND grants.rels IS NULL) -- ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s", "nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY 1, 2 ),", "('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs):", "v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key in v['keys'] ])) privilege[k] = v", "in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__',", "{schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\", ) # ALL", "UNION SELECT oid, rolname from pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels", "privilege=privilege.upper(), ) privilege = dict() for k, v in tpl.items(): if isinstance(v, string_types):", "PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s ON %(TYPE)s TO {role};", "( SELECT 0 AS oid, 'public' AS rolname UNION SELECT oid, rolname from", "inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname)", "| N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE | REVOKE", "key, NULL AS \"schema\", COALESCE(rolname, 'public') as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS", ") AS grants GROUP BY 1, 2, 3 ), namespaces AS ( SELECT", "= nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL", "= nsp.oid GROUP BY 1, 2 ), roles AS ( SELECT oid, rolname", "array_agg(DISTINCT proname ORDER BY proname) AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee,", "formatting t_array='(%s)' % (', '.join(['%r' % i for i in t or []])),", "if privilege is fully granted. If the privilege is not granted at #", "pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants", "Here is a truth table: # # FOR GRANT | no grant |", "not granted at # all, we drop the row in WHERE clause to", "NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s", "2 ), roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0,", "proacl IS NULL ) AS grants GROUP BY 1, 2, 3 ), namespaces", "OUTER JOIN all_grants AS grants ON relnamespace = nsp.oid AND grantee = rol.oid", "as rolname, TRUE AS \"full\", pg_catalog.pg_get_userbyid(owner) AS owner FROM grants LEFT OUTER JOIN", "rolname from pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN", "SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname,", "import chain from textwrap import dedent from .utils import string_types shared_queries = dict(", "privilege is irrelevant : it is both granted and revoked. # # When", "is tricky because we have to manage partial grant. But the # trickiest", "ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class", "grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype", "NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces AS nsp", "# # FOR GRANT | no grant | partial grant | fully granted", "= list(chain(*[ format_keys(key, fmt_args) for key in v['keys'] ])) privilege[k] = v return", "= 0 WHERE defaclacl IS NULL ) SELECT priv AS key, NULL AS", "privilege), (name, [all_, default, global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw =", "noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE", "proname) AS procs FROM ( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM", "is considered as # revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels", "{database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl =", "-----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- #", "chain from textwrap import dedent from .utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\", "NO tables, we always return a row with full as NULL, # meaning", "| REVOKE # -----------+----------+---------------+--------------- # # When namespace has NO tables, we always", "inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA", "SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS priv FROM pg_roles AS", "format_keys(key, fmt_args) for key in v['keys'] ])) privilege[k] = v return name, privilege", "0 OR rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname", "rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND", "AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace", "AS ( SELECT 0 AS oid, 'public' AS rolname UNION SELECT oid, rolname", "NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL) -- ORDER BY", "2 \"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE", "granted at # all, we drop the row in WHERE clause to ensure", "is it # granted or revoked ? We have to tell ldap2pg that", "nsp.oid GROUP BY 1, 2 ), roles AS ( SELECT oid, rolname FROM", "FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs", "'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname =", "(array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL) AND (priv IS NULL", "privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS", "AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND", "AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM", "{role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s", "ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS nsp CROSS JOIN all_roles AS rol", "make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default, global_def]), ]) def make_rel_privileges( privilege, TYPE,", "2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT defaclrole AS owner,", "granted and revoked. # # When namespace has tables, we compare grants to", "grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS grants", "ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels", "FROM {role}\"), ) _types = { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES':", "REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl',", "JOIN grants ON pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs,", "nspname <> 'pg_toast' -- ORDER BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH", "JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol", "nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol", "compare grants to availables tables to # determine if privilege is fully granted.", "AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee =", "'%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"), #", "v = v % fmt_args else: if v['shared_query'] not in shared_queries: raise Exception(\"Unknown", "SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels =", "FOR GRANT | no grant | partial grant | fully granted # -----------+----------+---------------+---------------", "pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype AS key, nspname, COALESCE(rolname, 'public')", "CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname = current_database() ) SELECT", "BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO", "alias = '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege in", "priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype", "AS rol ON grants.grantee = rol.oid WHERE (grantee = 0 OR rolname IS", "# -----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE | REVOKE # -----------+----------+---------------+---------------", "CROSS JOIN all_roles AS rol LEFT OUTER JOIN all_grants AS grants ON relnamespace", "make_privilege(tpl, name, TYPE, privilege): t = _types.get(TYPE) fmt_args = dict( t=t, # Loose", "'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__',", "| NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT", "FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl", "'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY 1, 2 ), all_grants AS (", ") SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT", "pg_default_acl AS def WHERE defaclnamespace = 0 UNION SELECT rol.oid AS owner, 0", "] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__',", "REVOKE | no grant | partial grant | fully granted # -----------+----------+---------------+--------------- #", "WHERE (grantee = 0 OR rolname IS NOT NULL) AND nspname NOT LIKE", "ON grants.grantee = rol.oid WHERE (grantee = 0 OR rolname IS NOT NULL)", "pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON", "= ['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES'))", "OR rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <>", "1) IS NOT NULL AND grants.procs IS NULL) AND (priv IS NULL OR", "array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP", "pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND", "when there is no tables in a namespace. In this case, is it", "NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL) AND (priv IS", "SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp", "(grantee = 0 OR rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%'", ") _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE", "is a truth table: # # FOR GRANT | no grant | partial", "_global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\"", "(aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_'", "t=t, # Loose SQL formatting t_array='(%s)' % (', '.join(['%r' % i for i", "'__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) # This is a", "WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS", "| NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP", "isinstance(v, string_types): v = v % fmt_args else: if v['shared_query'] not in shared_queries:", "def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for t in fmt_kwargs['t']: yield fmt", "LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee =", "AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"), # noqa", "nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid", "has tables, we compare grants to availables tables to # determine if privilege", "% (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege,", "there is no tables in a namespace. In this case, is it #", "pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE grantee = 0 OR rolname", "ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces AS nsp CROSS", "in a namespace. In this case, is it # granted or revoked ?", ") _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE", "grant. But the # trickiest comes when there is no tables in a", "grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS procs", ") SELECT priv || '_on_' || objtype AS key, nspname, COALESCE(rolname, 'public') AS", "import dedent from .utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS", "<> 'pg_toast' GROUP BY 1, 2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type,", "rol LEFT OUTER JOIN all_grants AS grants ON relnamespace = nsp.oid AND grantee", "no tables in a namespace. In this case, is it # granted or", "# meaning privilege is irrelevant : it is both granted and revoked. #", "TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default =", "dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname ORDER BY", "SELECT 0 AS oid, 'public' AS rolname UNION SELECT oid, rolname from pg_roles", "ON grants.grantee = rol.oid WHERE grantee = 0 OR rolname IS NOT NULL", "roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL) AND", "# FOR REVOKE | no grant | partial grant | fully granted #", "LEFT OUTER JOIN grants ON pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE", "NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON", "FROM namespaces AS nsp CROSS JOIN roles LEFT OUTER JOIN grants ON pronamespace", "type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name =", "itertools import chain from textwrap import dedent from .utils import string_types shared_queries =", "DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), )", "pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl", "AS \"full\" FROM namespace_rels AS nsp CROSS JOIN all_roles AS rol LEFT OUTER", "FOR REVOKE | no grant | partial grant | fully granted # -----------+----------+---------------+---------------", "def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' %", "{ 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def", "grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.*", "v['shared_query']) v = v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key in v['keys']", "OR priv = '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1,", "SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema}", "AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_class AS rel ON rel.relnamespace", "AS priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION SELECT q.* FROM (VALUES", "AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp", "list(chain(*[ format_keys(key, fmt_args) for key in v['keys'] ])) privilege[k] = v return name,", "\"full\" FROM namespaces AS nsp CROSS JOIN roles LEFT OUTER JOIN grants ON", "grants.procs IS NULL) AND (priv IS NULL OR priv = '%(privilege)s') AND nspname", "AND grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS", "GROUP BY 1, 2, 3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT", "privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default", "), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS", "FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv FROM", "JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE rolname IS NOT NULL", "in WHERE clause to ensure the privilege is considered as # revoked. #", "ON pro.pronamespace = nsp.oid GROUP BY 1, 2 ), roles AS ( SELECT", "defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL", "table: # # FOR GRANT | no grant | partial grant | fully", "'__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__',", "= '%(privilege)s') AND nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"),", "grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS", "LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE grantee =", "inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE", "% fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl,", "# # When namespace has NO tables, we always return a row with", "SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database", "JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP BY 1, 2 ),", "fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_,", "'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt,", "ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};\", )", "pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS", "rolname UNION SELECT oid, rolname from pg_roles ) SELECT nspname, rolname, CASE WHEN", "COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces AS nsp CROSS JOIN roles LEFT", "keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema} GRANT %(privilege)s", "have to manage partial grant. But the # trickiest comes when there is", "# revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT", "privilege is considered as # revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH", "But the # trickiest comes when there is no tables in a namespace.", "% v['shared_query']) v = v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key in", "LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' -- ORDER BY 1, 2, 3, 5", "WHERE relkind IN %(t_array)s GROUP BY 1, 2, 3 ), all_roles AS (", "\"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"),", "TYPE, privilege), (name, [all_, default]), ]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__',", "AS priv FROM pg_default_acl AS def WHERE defaclnamespace = 0 UNION SELECT rol.oid", "\"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS", "JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE grantee = 0 OR", "WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS", "( SELECT pronamespace, proname, (aclexplode(proacl)).grantee, (aclexplode(proacl)).privilege_type AS priv FROM pg_catalog.pg_proc UNION SELECT pronamespace,", "= v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key in v['keys'] ])) privilege[k]", "2, 3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY", "NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY 1,", "pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT", "GRANT | GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE | no", "'__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__', '__trigger__', '__truncate__', '__update_on_tables__',", "AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL ) AS", "owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN", "%(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\",", "DATABASE {database} FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER", "ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN", "= [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__']", "AS rolname UNION SELECT oid, rolname from pg_roles ) SELECT nspname, rolname, CASE", "rol.oid WHERE rolname IS NOT NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH", "N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE |", "IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl =", "determine if privilege is fully granted. If the privilege is not granted at", "default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default, global_def]), ]) def", "pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS procs FROM ( SELECT", "fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' %", "default, global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_", "| partial grant | fully granted # -----------+----------+---------------+--------------- # no tables | NOOP", "grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl", "ensure the privilege is considered as # revoked. # _allrelacl_tpl = dict( type='nspacl',", "make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl,", "NOT NULL AND grants.procs IS NULL) AND (priv IS NULL OR priv =", "privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__']", "NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol", "grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s", "objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype AS key, nspname,", "\"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _types =", "v['shared_query'] not in shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query']) v = v.copy()", "(0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL", "3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname),", "% (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES'))", "SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs =", "= { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',), }", "= '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT',", "in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' %", "to # determine if privilege is fully granted. If the privilege is not", "nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM namespaces AS nsp CROSS JOIN", "} def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for t in fmt_kwargs['t']: yield", "(array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL) -- ORDER BY 1,", "namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege),", "= rol.oid AND privilege_type = '%(privilege)s' WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL", "= 0 OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants AS (", "AS objtype FROM pg_catalog.pg_default_acl ) SELECT priv || '_on_' || objtype AS key,", "grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace LEFT OUTER JOIN pg_catalog.pg_roles AS", "rol.oid WHERE grantee = 0 OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH", "oid, 'public' AS rolname UNION SELECT oid, rolname from pg_roles ) SELECT nspname,", "AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname = current_database()", "AND datname = current_database() ) SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname,", "TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON nsp.oid", "IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY", "(aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl ) SELECT", "{role}\"), ) _types = { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',),", "None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None,", "grants GROUP BY 1, 2, 3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname,", "NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When namespace has NO", "# -----------+----------+---------------+--------------- # # FOR REVOKE | no grant | partial grant |", "is both granted and revoked. # # When namespace has tables, we compare", "? We have to tell ldap2pg that this grant is irrelevant on #", "AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs", "'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege", "v['keys'] ])) privilege[k] = v return name, privilege def make_proc_privileges( privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'):", "OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0", "FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE", "<> 'pg_toast' -- ORDER BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants", "privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'), make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__',", "defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype FROM pg_catalog.pg_default_acl )", "relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind", "GRANT | NOOP # -----------+----------+---------------+--------------- # # FOR REVOKE | no grant |", "defaclnamespace = 0 UNION SELECT rol.oid AS owner, 0 AS grantee, 'EXECUTE' AS", "compatibility alias. privileges['__usage_on_types__'] = ['__default_usage_on_types__'] privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in", "nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs,", "REVOKE | REVOKE # -----------+----------+---------------+--------------- # # When namespace has NO tables, we", "SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER", "'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__'", "WITH grants AS ( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv,", "dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT", "('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for t", "return a row with full as NULL, # meaning privilege is irrelevant :", "objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS", "irrelevant : it is both granted and revoked. # # When namespace has", "or revoked ? We have to tell ldap2pg that this grant is irrelevant", "WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database", "namespace has tables, we compare grants to availables tables to # determine if", "'SEQUENCES')) privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__',", "key, nspname, COALESCE(rolname, 'public') AS rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS", "current_database() UNION SELECT q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS", "rolname FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid", "( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def", "0 AS grantee, 'EXECUTE' AS priv FROM pg_catalog.pg_proc WHERE proacl IS NULL )", "AS rol LEFT OUTER JOIN all_grants AS grants ON relnamespace = nsp.oid AND", "%(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE %(privilege)s", "granted or revoked ? We have to tell ldap2pg that this grant is", "privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__'", "defacl.defaclnamespace = 0 WHERE defaclacl IS NULL ) SELECT priv AS key, NULL", "WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL) AND (priv", "format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for t in fmt_kwargs['t']: yield fmt %", "i in t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for k,", "NOT NULL AND grants.rels IS NULL) -- ORDER BY 1, 2 \"\"\"), grant=\"GRANT", "TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower()) all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw default =", "JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid WHERE (grantee = 0 OR", "(name, [all_, default]), ]) def make_well_known_privileges(): privileges = dict([ make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'),", "ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\", )", "(aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN", "textwrap import dedent from .utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants", "keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s", "0 AS oid, 'public' AS rolname UNION SELECT oid, rolname from pg_roles )", "privileges['__all_on_schemas__'] = [ '__create_on_schemas__', '__usage_on_schemas__', ] privileges['__all_on_sequences__'] = [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ]", "AS def WHERE defaclnamespace = 0 UNION SELECT rol.oid AS owner, 0 AS", "nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP BY", "priv FROM pg_catalog.pg_proc UNION SELECT pronamespace, proname, 0 AS grantee, 'EXECUTE' AS priv", "BY 1, 2 ), roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION", "JOIN roles LEFT OUTER JOIN grants ON pronamespace = nsp.oid AND grants.grantee =", "in shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query']) v = v.copy() v['keys'] =", "|| '_on_' || objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE AS", "and revoked. # # When namespace has tables, we compare grants to availables", "at # all, we drop the row in WHERE clause to ensure the", "-- ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA", "ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL )", "tricky because we have to manage partial grant. But the # trickiest comes", "ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc", "] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__', '__trigger__', '__truncate__', '__update_on_tables__', ] return", "t in fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs", "privilege): t = _types.get(TYPE) fmt_args = dict( t=t, # Loose SQL formatting t_array='(%s)'", "# this schema. # # Here is a truth table: # # FOR", "v % fmt_args else: if v['shared_query'] not in shared_queries: raise Exception(\"Unknown query %s.\"", "dedent from .utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS (", "WHERE grantee = 0 OR rolname IS NOT NULL \"\"\"), defacl=dedent(\"\"\"\\ WITH grants", "revoke=\"REVOKE %(privilege)s ON DATABASE {database} FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl',", "%(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}\", revoke=( \"REVOKE %(privilege)s ON", "None, 'TEMPORARY'), make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__',", "FOR ROLE {owner}\" \" REVOKE %(privilege)s ON %(TYPE)s FROM {role};\"), ) _defacl_tpl =", "nsp.nspname, array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels FROM pg_catalog.pg_namespace nsp LEFT OUTER", "a row with full as NULL, # meaning privilege is irrelevant : it", "GROUP BY 1, 2, 3 ), all_roles AS ( SELECT 0 AS oid,", ") # ALL TABLES is tricky because we have to manage partial grant.", "1, 2, 3 ), all_roles AS ( SELECT 0 AS oid, 'public' AS", "%(TYPE)s FROM {role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s", "dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv", "['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias", "pg_catalog.pg_database WHERE datacl IS NULL AND datname = current_database() ) SELECT grants.priv AS", "dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), (name, [all_, default]), ])", "'__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE':", "BY 1, 2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname ORDER", "grants.grantee = rol.oid WHERE rolname IS NOT NULL OR grantee = 0 \"\"\"),", "(aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE defaclnamespace = 0 UNION SELECT", "THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS", "yield fmt % dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs def make_privilege(tpl, name,", "priv || '_on_' || objtype AS key, nspname, COALESCE(rolname, 'public') AS rolname, TRUE", "from pg_roles ) SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL", "rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[])", "[all_, default, global_def]), ]) def make_rel_privileges( privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'): fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())", ") privilege = dict() for k, v in tpl.items(): if isinstance(v, string_types): v", "pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro", "SELECT 0, 'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN", "(privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__']", "pronamespace = nsp.oid AND grants.grantee = roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT", "ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _types = { 'FUNCTIONS':", "from .utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT", "WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY 1, 2", "LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname <> 'pg_toast' GROUP BY 1, 2 ), all_grants AS", "make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name,", "GROUP BY 1, 2 ), all_grants AS ( SELECT relnamespace, (aclexplode(relacl)).privilege_type, (aclexplode(relacl)).grantee, array_agg(relname", "grantee, 'EXECUTE' AS priv FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS", "fmt_kwargs['t']: yield fmt % dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs def make_privilege(tpl,", "relnamespace = nsp.oid AND grantee = rol.oid AND privilege_type = '%(privilege)s' WHERE NOT", "= '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_,", "DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO {role};\"), revoke=(", "AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname, COALESCE(rolname, 'public') AS", "GRANT %(privilege)s ON %(TYPE)s TO {role}; \"\"\"), revoke=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE", "privilege), make_privilege(_global_defacl_tpl, global_def, TYPE, privilege), (name, [all_, default, global_def]), ]) def make_rel_privileges( privilege,", "ON %(TYPE)s TO {role};\"), revoke=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" REVOKE", "nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\"", "# -----------+----------+---------------+--------------- # # When namespace has NO tables, we always return a", ") SELECT nspname, rolname, CASE WHEN nsp.rels = ARRAY[]::name[] THEN NULL ELSE nsp.rels", "{schema} FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT", "BY 1, 2, 3 ), all_roles AS ( SELECT 0 AS oid, 'public'", "v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key in v['keys'] ])) privilege[k] =", "dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs def make_privilege(tpl, name, TYPE, privilege): t", "| N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT | NOOP", "= dict( datacl=dedent(\"\"\"\\ WITH grants AS ( SELECT (aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS", "no tables | NOOP | N/D | N/D # -----------+----------+---------------+--------------- # 1+ tables", "FROM pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid", "AS key, NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles", "oid, rolname FROM pg_catalog.pg_roles UNION SELECT 0, 'public' ) SELECT nspname, rolname, CASE", "ORDER BY 1, 2 \"\"\") ) _datacl_tpl = dict( type='datacl', inspect=dict(shared_query='datacl', keys=['%(privilege)s']), grant=\"GRANT", "( SELECT defaclnamespace, defaclrole, (aclexplode(defaclacl)).grantee AS grantee, (aclexplode(defaclacl)).privilege_type AS priv, defaclobjtype AS objtype", "JOIN all_roles AS rol LEFT OUTER JOIN all_grants AS grants ON relnamespace =", "grant is irrelevant on # this schema. # # Here is a truth", "row in WHERE clause to ensure the privilege is considered as # revoked.", "row with full as NULL, # meaning privilege is irrelevant : it is", "'__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE,", "_nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\",", "TYPE, privilege): t = _types.get(TYPE) fmt_args = dict( t=t, # Loose SQL formatting", "ROLE {owner} IN SCHEMA {schema} REVOKE %(privilege)s ON %(TYPE)s FROM {role}; \"\"\"), )", "'__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw", "UNION SELECT 0, 'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[]", "-----------+----------+---------------+--------------- # 1+ tables | NOOP | REVOKE | REVOKE # -----------+----------+---------------+--------------- #", "ON grants.grantee = rol.oid WHERE rolname IS NOT NULL OR grantee = 0", "the privilege is considered as # revoked. # _allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\", "(', '.join(['%r' % i for i in t or []])), TYPE=TYPE, privilege=privilege.upper(), )", "nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS", "grants to availables tables to # determine if privilege is fully granted. If", "privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] = [", "END AS \"full\" FROM namespaces AS nsp CROSS JOIN roles LEFT OUTER JOIN", "pg_catalog.pg_namespace nsp LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP", "namespace_rels AS nsp CROSS JOIN all_roles AS rol LEFT OUTER JOIN all_grants AS", "IN SCHEMA {schema} FROM {role}\"), ) _types = { 'FUNCTIONS': ('f',), 'TABLES': ('r',", "('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for t in fmt_kwargs['t']:", "N/D # -----------+----------+---------------+--------------- # 1+ tables | GRANT | GRANT | NOOP #", "= nsp.oid AND relkind IN %(t_array)s WHERE nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' AND nspname", "NULL, # meaning privilege is irrelevant : it is both granted and revoked.", "namespace, COALESCE(rolname, 'public') FROM grants LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee", "(0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE datacl IS NULL AND datname", "['__%s_on_tables__' % (privilege.lower(),)] for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE',", "FROM {role}\"), ) _allprocacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace,", "= ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[]) END AS \"full\" FROM", "'public' ) SELECT nspname, rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE", "all_roles AS rol LEFT OUTER JOIN all_grants AS grants ON relnamespace = nsp.oid", "namespaces AS nsp CROSS JOIN roles LEFT OUTER JOIN grants ON pronamespace =", "dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH grants AS (SELECT pronamespace, grantee, priv, array_agg(DISTINCT proname ORDER", "revoke=( \"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _types", "privilege is not granted at # all, we drop the row in WHERE", "= '__%(privilege)s_on_all_%(type)s__' % fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt %", "fmt_args else: if v['shared_query'] not in shared_queries: raise Exception(\"Unknown query %s.\" % v['shared_query'])", "is irrelevant on # this schema. # # Here is a truth table:", "v = v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key in v['keys'] ]))", "this schema. # # Here is a truth table: # # FOR GRANT", "IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER", "JOIN pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE", "= dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \"", "pro.pronamespace = nsp.oid GROUP BY 1, 2 ), roles AS ( SELECT oid,", "no grant | partial grant | fully granted # -----------+----------+---------------+--------------- # no tables", "ORDER BY 1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN", "'TYPES': ('T',), 'SEQUENCES': ('S',), } def format_keys(fmt, fmt_kwargs): if '%(t)' in fmt: for", "[]])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for k, v in tpl.items(): if", "is no tables in a namespace. In this case, is it # granted", "_types = { 'FUNCTIONS': ('f',), 'TABLES': ('r', 'v', 'f'), 'TYPES': ('T',), 'SEQUENCES': ('S',),", "ORDER BY relname) AS rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY", "1, 2 ), roles AS ( SELECT oid, rolname FROM pg_catalog.pg_roles UNION SELECT", "NOT NULL OR grantee = 0 \"\"\"), nspacl=dedent(\"\"\"\\ WITH grants AS ( SELECT", "fmtkw return dict([ make_privilege(_allprocacl_tpl, all_, TYPE, privilege), make_privilege(_defacl_tpl, default, TYPE, privilege), make_privilege(_global_defacl_tpl, global_def,", ") SELECT grants.priv AS key, NULL as namespace, COALESCE(rolname, 'public') FROM grants LEFT", "rolname, CASE WHEN nsp.procs = ARRAY[]::name[] THEN NULL ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[])", "rels FROM pg_catalog.pg_class WHERE relkind IN %(t_array)s GROUP BY 1, 2, 3 ),", "AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv AS key, nspname,", "grants ON relnamespace = nsp.oid AND grantee = rol.oid AND privilege_type = '%(privilege)s'", "IS NULL) AND (priv IS NULL OR priv = '%(privilege)s') AND nspname NOT", "None, 'CREATE'), make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'), make_privilege( _defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'), ]) #", "WHERE proacl IS NULL ) AS grants GROUP BY 1, 2, 3 ),", "% fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl, all_, TYPE, privilege),", "FROM {role};\", ) _global_defacl_tpl = dict( type='globaldefacl', inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']), grant=( \"ALTER DEFAULT PRIVILEGES", "0 AS grantee, 'EXECUTE' AS priv FROM pg_roles AS rol LEFT OUTER JOIN", "it # granted or revoked ? We have to tell ldap2pg that this", "TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for k, v in tpl.items(): if isinstance(v,", "WITH grants AS ( SELECT defaclrole AS owner, (aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM", "ORDER BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS ( SELECT", "\"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}\"), ) _allprocacl_tpl =", "# # Here is a truth table: # # FOR GRANT | no", "= roles.oid WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL)", "rol.oid AND defacl.defaclnamespace = 0 WHERE defaclacl IS NULL ) SELECT priv AS", "(aclexplode(defaclacl)).grantee, (aclexplode(defaclacl)).privilege_type AS priv FROM pg_default_acl AS def WHERE defaclnamespace = 0 UNION", "FROM pg_roles AS rol LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl ON defacl.defaclrole =", "always return a row with full as NULL, # meaning privilege is irrelevant", "in t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict() for k, v", "IS NULL) -- ORDER BY 1, 2 \"\"\"), grant=\"GRANT %(privilege)s ON ALL %(TYPE)s", "1, 2 \"\"\"), # noqa grant=\"GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema}", "'pg_toast' -- ORDER BY 1, 2, 3, 5 \"\"\"), globaldefacl=dedent(\"\"\"\\ WITH grants AS", "{role}; \"\"\"), ) _nspacl_tpl = dict( type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA", "= [ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__',", "[ '__select_on_sequences__', '__update_on_sequences__', '__usage_on_sequences__', ] privileges['__all_on_tables__'] = [ '__delete__', '__insert__', '__references__', '__select_on_tables__', '__trigger__',", "pg_catalog.pg_class AS rel ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s WHERE nspname", "BY 1, 2, 3 ), namespaces AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname", "LEFT OUTER JOIN pg_catalog.pg_proc AS pro ON pro.pronamespace = nsp.oid GROUP BY 1,", "'TRUNCATE': privileges.update( make_rel_privileges(privilege, 'TABLES')) alias = '__%s__' % (privilege.lower(),) privileges[alias] = ['__%s_on_tables__' %", "dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA", "default = '__default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allrelacl_tpl,", "# # FOR REVOKE | no grant | partial grant | fully granted", "privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS')) privileges['__execute__'] = ['__execute_on_functions__'] for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE':", "AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND nspname <> 'pg_toast' ORDER BY 1, 2", "from textwrap import dedent from .utils import string_types shared_queries = dict( datacl=dedent(\"\"\"\\ WITH", "AS rol ON grants.grantee = rol.oid WHERE rolname IS NOT NULL OR grantee", "_allrelacl_tpl = dict( type='nspacl', inspect=dedent(\"\"\"\\ WITH namespace_rels AS ( SELECT nsp.oid, nsp.nspname, array_remove(array_agg(rel.relname", "{role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES FOR", "q.* FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q CROSS JOIN pg_catalog.pg_database WHERE", "nspname NOT LIKE 'pg\\\\_%%temp\\\\_%%' -- ORDER BY 1, 2 \"\"\"), # noqa grant=\"GRANT", "SELECT nspname, (aclexplode(nspacl)).grantee AS grantee, (aclexplode(nspacl)).privilege_type AS priv FROM pg_catalog.pg_namespace ) SELECT grants.priv", "type=\"nspacl\", inspect=dict(shared_query='nspacl', keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON SCHEMA {schema} TO {role};\", revoke=\"REVOKE %(privilege)s ON", "nspname <> 'pg_toast' ORDER BY 1, 2 \"\"\") ) _datacl_tpl = dict( type='datacl',", "rolname, TRUE AS full, pg_catalog.pg_get_userbyid(defaclrole) AS owner FROM grants JOIN pg_catalog.pg_namespace nsp ON", "= rol.oid WHERE (grantee = 0 OR rolname IS NOT NULL) AND nspname", "for privilege in 'SELECT', 'UPDATE': privileges.update(make_rel_privileges(privilege, 'TABLES')) privileges.update(make_rel_privileges(privilege, 'SEQUENCES')) privileges.update(make_rel_privileges('USAGE', 'SEQUENCES')) privileges['__all_on_schemas__'] =", "% fmtkw default = '__default_%(privilege)s_on_%(type)s__' % fmtkw global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name", "grantee, priv, array_agg(DISTINCT proname ORDER BY proname) AS procs FROM ( SELECT pronamespace,", "global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw name = namefmt % fmtkw return dict([ make_privilege(_allprocacl_tpl,", "keys=['%(privilege)s']), grant=\"GRANT %(privilege)s ON DATABASE {database} TO {role};\", revoke=\"REVOKE %(privilege)s ON DATABASE {database}", "nsp.oid, nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp", "have to tell ldap2pg that this grant is irrelevant on # this schema.", "IS NULL AND datname = current_database() ) SELECT grants.priv AS key, NULL as", "NULL ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[]) END AS \"full\" FROM namespace_rels AS nsp", "i for i in t or []])), TYPE=TYPE, privilege=privilege.upper(), ) privilege = dict()", "nsp.nspname, array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs FROM pg_catalog.pg_namespace nsp LEFT", "(aclexplode(datacl)).grantee AS grantee, (aclexplode(datacl)).privilege_type AS priv FROM pg_catalog.pg_database WHERE datname = current_database() UNION", "\"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}\" \" GRANT %(privilege)s ON %(TYPE)s TO {role};\"),", "%s.\" % v['shared_query']) v = v.copy() v['keys'] = list(chain(*[ format_keys(key, fmt_args) for key", "FROM {role};\"), ) _defacl_tpl = dict( type=\"defacl\", inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']), grant=dedent(\"\"\"\\ ALTER DEFAULT PRIVILEGES", "% dict(fmt_kwargs, t=t) else: yield fmt % fmt_kwargs def make_privilege(tpl, name, TYPE, privilege):", "= 0 OR rolname IS NOT NULL) AND nspname NOT LIKE 'pg\\\\_%temp\\\\_%' AND" ]
[ "run(self, context): \"\"\" Runs the test step :type context: TestStepContext :param context: test", "self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid (it should", "self.MULTIPLY: self._result = first_value * second_value elif self._pars.operator == self.DIVIDE: if second_value ==", "\"Second value = 0 ! Division by 0 is not possible\" self._logger.error(msg) raise", "(C) 2017 Intel Corporation Licensed under the Apache License, Version 2.0 (the \"License\");", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid (it should have been checked", "\"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\"", "self._pars.operator == self.SUBTRACT: self._result = first_value - second_value elif self._pars.operator == self.MULTIPLY: self._result", "first_value * second_value elif self._pars.operator == self.DIVIDE: if second_value == 0: msg =", "TestStepContext :param context: test case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD,", "context): \"\"\" Runs the test step :type context: TestStepContext :param context: test case", "= \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf,", "= first_value - second_value elif self._pars.operator == self.MULTIPLY: self._result = first_value * second_value", "TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD", "second_value = float(self._pars.second) if self._pars.operator == self.ADD: self._result = first_value + second_value elif", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "self.ADD: self._result = first_value + second_value elif self._pars.operator == self.SUBTRACT: self._result = first_value", "self._pars.operator == self.MULTIPLY: self._result = first_value * second_value elif self._pars.operator == self.DIVIDE: if", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "second_value elif self._pars.operator == self.DIVIDE: if second_value == 0: msg = \"Second value", "OF ANY KIND, either express or implied. See the License for the specific", "to in writing, software distributed under the License is distributed on an \"AS", "\"\"\" Copyright (C) 2017 Intel Corporation Licensed under the Apache License, Version 2.0", "Intel Corporation Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "if second_value == 0: msg = \"Second value = 0 ! Division by", "= 0 ! Division by 0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg)", "by the framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator == self.ADD:", "not use this file except in compliance with the License. You may obtain", "License for the specific language governing permissions and limitations under the License. SPDX-License-Identifier:", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT", "def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf,", "specific language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from", "the test step :type context: TestStepContext :param context: test case context \"\"\" TestStepBase.run(self,", "value is invalid (it should have been checked by the framework)\" first_value =", "except in compliance with the License. You may obtain a copy of the", "from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical", "may not use this file except in compliance with the License. You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "elif self._pars.operator == self.MULTIPLY: self._result = first_value * second_value elif self._pars.operator == self.DIVIDE:", "from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD =", "if self._pars.operator == self.ADD: self._result = first_value + second_value elif self._pars.operator == self.SUBTRACT:", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "Corporation Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "the specific language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\"", "limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException", "ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result = None", "TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result = None def run(self, context): \"\"\" Runs", "= first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s stored as {0}\".format(self._result)", "context: test case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY,", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "= float(self._pars.second) if self._pars.operator == self.ADD: self._result = first_value + second_value elif self._pars.operator", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "== 0: msg = \"Second value = 0 ! Division by 0 is", "= float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator == self.ADD: self._result = first_value +", "/ second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s stored as {0}\".format(self._result) % self._pars.save_result_as", "under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import", "operation \"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE =", "global_conf, ts_conf, factory) self._result = None def run(self, context): \"\"\" Runs the test", "been checked by the framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "the License for the specific language governing permissions and limitations under the License.", ":type context: TestStepContext :param context: test case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator", "\"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf,", "ANY KIND, either express or implied. See the License for the specific language", "and limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from", "== self.SUBTRACT: self._result = first_value - second_value elif self._pars.operator == self.MULTIPLY: self._result =", "\"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\"", "file except in compliance with the License. You may obtain a copy of", "self._result = first_value + second_value elif self._pars.operator == self.SUBTRACT: self._result = first_value -", "the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException", "elif self._pars.operator == self.SUBTRACT: self._result = first_value - second_value elif self._pars.operator == self.MULTIPLY:", "- second_value elif self._pars.operator == self.MULTIPLY: self._result = first_value * second_value elif self._pars.operator", "value = 0 ! Division by 0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER,", "should have been checked by the framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second)", "Unless required by applicable law or agreed to in writing, software distributed under", "Copyright (C) 2017 Intel Corporation Licensed under the Apache License, Version 2.0 (the", "\"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory):", "0 ! Division by 0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else:", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase", "self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid (it should have been checked by", "2.0 (the \"License\"); you may not use this file except in compliance with", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value / second_value context.set_info(self._pars.save_result_as,", "is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value / second_value", "See the License for the specific language governing permissions and limitations under the", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg =", "checked by the framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator ==", "== self.ADD: self._result = first_value + second_value elif self._pars.operator == self.SUBTRACT: self._result =", "import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\"", "self.DIVIDE: if second_value == 0: msg = \"Second value = 0 ! Division", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "test step :type context: TestStepContext :param context: test case context \"\"\" TestStepBase.run(self, context)", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "elif self._pars.operator == self.DIVIDE: if second_value == 0: msg = \"Second value =", "Runs the test step :type context: TestStepContext :param context: test case context \"\"\"", "Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase):", "License, Version 2.0 (the \"License\"); you may not use this file except in", "MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\"", "compliance with the License. You may obtain a copy of the License at", "(the \"License\"); you may not use this file except in compliance with the", "in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid (it should have", "first_value - second_value elif self._pars.operator == self.MULTIPLY: self._result = first_value * second_value elif", "step :type context: TestStepContext :param context: test case context \"\"\" TestStepBase.run(self, context) assert", "framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator == self.ADD: self._result =", "= first_value * second_value elif self._pars.operator == self.DIVIDE: if second_value == 0: msg", "this file except in compliance with the License. You may obtain a copy", "tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result", "self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg", "self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s stored as", "first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s stored as {0}\".format(self._result) %", "factory) self._result = None def run(self, context): \"\"\" Runs the test step :type", "\"License\"); you may not use this file except in compliance with the License.", "express or implied. See the License for the specific language governing permissions and", "tc_conf, global_conf, ts_conf, factory) self._result = None def run(self, context): \"\"\" Runs the", "\"\"\" Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\"", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "second_value == 0: msg = \"Second value = 0 ! Division by 0", "you may not use this file except in compliance with the License. You", "self._result = first_value * second_value elif self._pars.operator == self.DIVIDE: if second_value == 0:", "! Division by 0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result", "agreed to in writing, software distributed under the License is distributed on an", "float(self._pars.second) if self._pars.operator == self.ADD: self._result = first_value + second_value elif self._pars.operator ==", "2017 Intel Corporation Licensed under the Apache License, Version 2.0 (the \"License\"); you", "\"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result = None def run(self, context): \"\"\"", "assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid (it", "DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self,", "0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value /", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase", "self.DIVIDE], \\ \"Operator value is invalid (it should have been checked by the", "\\ \"Operator value is invalid (it should have been checked by the framework)\"", "second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s stored as {0}\".format(self._result) % self._pars.save_result_as self._logger.debug(self.ts_verdict_msg)", "else: self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s stored", "\"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result = None def", "invalid (it should have been checked by the framework)\" first_value = float(self._pars.first) second_value", "MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "applicable law or agreed to in writing, software distributed under the License is", "implied. See the License for the specific language governing permissions and limitations under", "\"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf,", "the framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator == self.ADD: self._result", "+ second_value elif self._pars.operator == self.SUBTRACT: self._result = first_value - second_value elif self._pars.operator", ":param context: test case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT,", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "self._result = first_value - second_value elif self._pars.operator == self.MULTIPLY: self._result = first_value *", "float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator == self.ADD: self._result = first_value + second_value", "\"Operator value is invalid (it should have been checked by the framework)\" first_value", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "self._result = None def run(self, context): \"\"\" Runs the test step :type context:", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation", "[self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid (it should have been", "TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is", "self.SUBTRACT: self._result = first_value - second_value elif self._pars.operator == self.MULTIPLY: self._result = first_value", "= None def run(self, context): \"\"\" Runs the test step :type context: TestStepContext", "None def run(self, context): \"\"\" Runs the test step :type context: TestStepContext :param", "context: TestStepContext :param context: test case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in", "second_value elif self._pars.operator == self.MULTIPLY: self._result = first_value * second_value elif self._pars.operator ==", "law or agreed to in writing, software distributed under the License is distributed", "context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator", "Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result = None def run(self, context):", "AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT =", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\"", "context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\ \"Operator value is invalid", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result))", "SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation", "== self.MULTIPLY: self._result = first_value * second_value elif self._pars.operator == self.DIVIDE: if second_value", "in compliance with the License. You may obtain a copy of the License", "* second_value elif self._pars.operator == self.DIVIDE: if second_value == 0: msg = \"Second", "msg) else: self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT: %s", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result =", "use this file except in compliance with the License. You may obtain a", "self._pars.operator == self.DIVIDE: if second_value == 0: msg = \"Second value = 0", "KIND, either express or implied. See the License for the specific language governing", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "= \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self,", "= first_value + second_value elif self._pars.operator == self.SUBTRACT: self._result = first_value - second_value", "for the specific language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0", "msg = \"Second value = 0 ! Division by 0 is not possible\"", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "Division by 0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result =", "governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import", "__init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory)", "first_value + second_value elif self._pars.operator == self.SUBTRACT: self._result = first_value - second_value elif", "ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def", "self._pars.operator == self.ADD: self._result = first_value + second_value elif self._pars.operator == self.SUBTRACT: self._result", "in writing, software distributed under the License is distributed on an \"AS IS\"", "class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\"", "def run(self, context): \"\"\" Runs the test step :type context: TestStepContext :param context:", "== self.DIVIDE: if second_value == 0: msg = \"Second value = 0 !", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "is invalid (it should have been checked by the framework)\" first_value = float(self._pars.first)", "License. SPDX-License-Identifier: Apache-2.0 \"\"\" from acs.Core.TestStep.TestStepBase import TestStepBase from acs.ErrorHandling.AcsConfigException import AcsConfigException class", "= \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf,", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "have been checked by the framework)\" first_value = float(self._pars.first) second_value = float(self._pars.second) if", "SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf,", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "either express or implied. See the License for the specific language governing permissions", "AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value / second_value context.set_info(self._pars.save_result_as, str(self._result)) self.ts_verdict_msg = \"VERDICT:", "by 0 is not possible\" self._logger.error(msg) raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg) else: self._result = first_value", "(TestStepBase): \"\"\" Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY =", "or agreed to in writing, software distributed under the License is distributed on", "test case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE],", "= \"MULTIPLY\" DIVIDE = \"DIVIDE\" def __init__(self, tc_conf, global_conf, ts_conf, factory): \"\"\" Constructor", "= \"Second value = 0 ! Division by 0 is not possible\" self._logger.error(msg)", "ts_conf, factory) self._result = None def run(self, context): \"\"\" Runs the test step", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "Mathematical operation \"\"\" ADD = \"ADD\" SUBTRACT = \"SUBTRACT\" MULTIPLY = \"MULTIPLY\" DIVIDE", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "\"\"\" Runs the test step :type context: TestStepContext :param context: test case context", "required by applicable law or agreed to in writing, software distributed under the", "(it should have been checked by the framework)\" first_value = float(self._pars.first) second_value =", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "acs.ErrorHandling.AcsConfigException import AcsConfigException class MathOperation (TestStepBase): \"\"\" Mathematical operation \"\"\" ADD = \"ADD\"", "\"\"\" Constructor \"\"\" TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory) self._result = None def run(self,", "case context \"\"\" TestStepBase.run(self, context) assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \\", "first_value = float(self._pars.first) second_value = float(self._pars.second) if self._pars.operator == self.ADD: self._result = first_value", "second_value elif self._pars.operator == self.SUBTRACT: self._result = first_value - second_value elif self._pars.operator ==", "0: msg = \"Second value = 0 ! Division by 0 is not" ]
[ "continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for p", "run(self, command): # code and code.cmd on Windows are not actual executables, but", "= \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\" def __str__(self):", "cmd_exts = [\"\"] project_suffix = \".sublime-project\" def __str__(self): return \"Sublime Text 3\" def", "f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return path return None def _find_project_here(self, path):", "os.fspath(path) def run(self, command): # code and code.cmd on Windows are not actual", "= \"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts =", "\"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\" def __str__(self): return \"Sublime Text 3\"", "= [\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return \"Visual Studio Code\" def", "background): if background: yield \"--background\" if path.suffix == self.project_suffix: yield \"--project\" else: yield", "find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts = self.cmd_exts else: cmd_exts = [\"\"]", "project_suffix = \".code-workspace\" def __str__(self): return \"Visual Studio Code\" def get_bin_mac(self, app): return", "cmd_exts = self.cmd_exts else: cmd_exts = [\"\"] for ext in cmd_exts: path =", "return found return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\"", "\"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\",", "background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self, command): # code and", "p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for p in path.parent.iterdir(): if", "import subprocess import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self,", "root): return root # TODO: Inspect Sublime Text to find where subl.exe is.", "FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for p in path.parent.iterdir(): if p.suffix !=", "directory): if sys.platform == \"win32\": cmd_exts = self.cmd_exts else: cmd_exts = [\"\"] for", "fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if not path.is_dir(): return", "def __str__(self): return \"Sublime Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\")", "cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return", "and code.cmd on Windows are not actual executables, but a batch # script.", "find(path) if found: return found return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher", "fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for p in path.parent.iterdir():", "Corporation\" display_prefix = \"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\"", "run it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher = None display_prefix", "def find_project(self, path): if not path.is_dir(): return None for find in [self._find_project_here, self._find_project_in_parent]:", "= self.cmd_exts else: cmd_exts = [\"\"] for ext in cmd_exts: path = pathlib.Path(directory,", "None def _find_project_here(self, path): for p in path.iterdir(): if p.suffix != self.project_suffix: continue", "p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self,", "if path.is_file() and os.access(path, os.X_OK): return path return None def _find_project_here(self, path): for", "pathlib import subprocess import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def", "_DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self, command): # code and code.cmd on", "if found: return found return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher =", "is. def iter_args(self, path, background): if background: yield \"--background\" if path.suffix == self.project_suffix:", "if background: yield \"--background\" if path.suffix == self.project_suffix: yield \"--project\" else: yield \"--new-window\"", "import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self, directory): if", "\"--new-window\" yield os.fspath(path) def run(self, command): # code and code.cmd on Windows are", "path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return path return None", "to run it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher = None", "\"Sublime Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root):", "get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return root # TODO:", "= \"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\" def __str__(self): return \"Sublime Text", "find_project(self, path): if not path.is_dir(): return None for find in [self._find_project_here, self._find_project_in_parent]: found", "\"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self):", "def run(self, command): # code and code.cmd on Windows are not actual executables,", "_Tool: def find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts = self.cmd_exts else: cmd_exts", "[\"\"] project_suffix = \".sublime-project\" def __str__(self): return \"Sublime Text 3\" def get_bin_mac(self, app):", "project_suffix = \".sublime-project\" def __str__(self): return \"Sublime Text 3\" def get_bin_mac(self, app): return", "path.is_dir(): return None for find in [self._find_project_here, self._find_project_in_parent]: found = find(path) if found:", "import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self, directory): if sys.platform ==", "publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\"", "Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return", "sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self, directory): if sys.platform", "__str__(self): return \"Visual Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\")", "[\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return \"Visual Studio Code\" def get_bin_mac(self,", "def __str__(self): return \"Visual Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\",", "def iter_args(self, path, background): if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def", "path.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p", "fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self, directory): if sys.platform == \"win32\":", "import os import pathlib import subprocess import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75", "path): for p in path.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem)", "if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def", "subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher = None display_prefix = None md_identifier", "= \".sublime-project\" def __str__(self): return \"Sublime Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\",", "def iter_args(self, path, background): if background: yield \"--background\" if path.suffix == self.project_suffix: yield", "a batch # script. We need the shell to run it. return subprocess.call(command,", "cmd_exts = [\"\"] for ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file()", "batch # script. We need the shell to run it. return subprocess.call(command, shell=(sys.platform", "path.suffix == self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield os.fspath(path) def run(self, command):", "[self._find_project_here, self._find_project_in_parent]: found = find(path) if found: return found return None class _DoesNotSupportBackground(ValueError):", "[\"\"] for ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path,", "_DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio", "\"bin\") def get_bin_win(self, root): return root # TODO: Inspect Sublime Text to find", "find in [self._find_project_here, self._find_project_in_parent]: found = find(path) if found: return found return None", "# script. We need the shell to run it. return subprocess.call(command, shell=(sys.platform ==", "sys.platform == \"win32\": cmd_exts = self.cmd_exts else: cmd_exts = [\"\"] for ext in", "ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return", "script. We need the shell to run it. return subprocess.call(command, shell=(sys.platform == \"win32\"))", "def find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts = self.cmd_exts else: cmd_exts =", "root.joinpath(\"bin\") def iter_args(self, path, background): if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path)", "= pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return path return None def", "not path.is_dir(): return None for find in [self._find_project_here, self._find_project_in_parent]: found = find(path) if", "_find_project_here(self, path): for p in path.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name,", "if not path.is_dir(): return None for find in [self._find_project_here, self._find_project_in_parent]: found = find(path)", "return None for find in [self._find_project_here, self._find_project_in_parent]: found = find(path) if found: return", "code.cmd on Windows are not actual executables, but a batch # script. We", "actual executables, but a batch # script. We need the shell to run", "75 class _Tool: def find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts = self.cmd_exts", "in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return path", "pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return path return None def _find_project_here(self,", "def _find_project_here(self, path): for p in path.iterdir(): if p.suffix != self.project_suffix: continue if", "= find(path) if found: return found return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool):", "def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\")", "return p def find_project(self, path): if not path.is_dir(): return None for find in", "but a batch # script. We need the shell to run it. return", "app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return root # TODO: Inspect Sublime Text", "subl.exe is. def iter_args(self, path, background): if background: yield \"--background\" if path.suffix ==", "SublimeText3(_Tool): publisher = None display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\"", "self._find_project_in_parent]: found = find(path) if found: return found return None class _DoesNotSupportBackground(ValueError): pass", "yield os.fspath(path) def run(self, command): # code and code.cmd on Windows are not", "__str__(self): return \"Sublime Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def", "path, background): if background: yield \"--background\" if path.suffix == self.project_suffix: yield \"--project\" else:", "== \"win32\")) class SublimeText3(_Tool): publisher = None display_prefix = None md_identifier = \"com.sublimetext.3\"", "return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path,", "path): for p in path.parent.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem)", "the shell to run it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher", "display_prefix = \"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts", "shell to run it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher =", "root # TODO: Inspect Sublime Text to find where subl.exe is. def iter_args(self,", "\".code-workspace\" def __str__(self): return \"Visual Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\",", "Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix", "= None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix =", "\"win32\": cmd_exts = self.cmd_exts else: cmd_exts = [\"\"] for ext in cmd_exts: path", "self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for", "None for find in [self._find_project_here, self._find_project_in_parent]: found = find(path) if found: return found", "p in path.parent.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD:", "for p in path.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) >", "!= self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self, path):", "= \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\" def", "get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path, background): if background: raise _DoesNotSupportBackground() yield", "os import pathlib import subprocess import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class", "find where subl.exe is. def iter_args(self, path, background): if background: yield \"--background\" if", "path): if not path.is_dir(): return None for find in [self._find_project_here, self._find_project_in_parent]: found =", "# TODO: Inspect Sublime Text to find where subl.exe is. def iter_args(self, path,", "class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio Code\" md_identifier", "def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return root #", "!= self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path):", "return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix =", "return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher = None display_prefix = None", "if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if not path.is_dir():", "p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self,", "None display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"]", "if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self, command): # code", "Inspect Sublime Text to find where subl.exe is. def iter_args(self, path, background): if", "import pathlib import subprocess import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool:", "iter_args(self, path, background): if background: yield \"--background\" if path.suffix == self.project_suffix: yield \"--project\"", "if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for p in", "Text to find where subl.exe is. def iter_args(self, path, background): if background: yield", "background: yield \"--background\" if path.suffix == self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield", "path return None def _find_project_here(self, path): for p in path.iterdir(): if p.suffix !=", "shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher = None display_prefix = None md_identifier =", "return path return None def _find_project_here(self, path): for p in path.iterdir(): if p.suffix", "root): return root.joinpath(\"bin\") def iter_args(self, path, background): if background: raise _DoesNotSupportBackground() yield \"--new-window\"", "path, background): if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self, command):", "= \"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem", "class SublimeText3(_Tool): publisher = None display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem =", "publisher = None display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts", "def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path, background): if background: raise _DoesNotSupportBackground()", "code and code.cmd on Windows are not actual executables, but a batch #", "return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return root # TODO: Inspect Sublime", "p def _find_project_in_parent(self, path): for p in path.parent.iterdir(): if p.suffix != self.project_suffix: continue", "not actual executables, but a batch # script. We need the shell to", "app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self,", "def _find_project_in_parent(self, path): for p in path.parent.iterdir(): if p.suffix != self.project_suffix: continue if", "3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return root", "== self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield os.fspath(path) def run(self, command): return", "def get_bin_win(self, root): return root # TODO: Inspect Sublime Text to find where", "# code and code.cmd on Windows are not actual executables, but a batch", "it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool): publisher = None display_prefix =", "return root.joinpath(\"bin\") def iter_args(self, path, background): if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield", "> FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if not path.is_dir(): return None for", "cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\" def __str__(self): return \"Sublime", "continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if not", "cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return \"Visual Studio Code\"", "get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def", "need the shell to run it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class SublimeText3(_Tool):", "= [\"\"] project_suffix = \".sublime-project\" def __str__(self): return \"Sublime Text 3\" def get_bin_mac(self,", "in path.parent.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return", "yield \"--background\" if path.suffix == self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield os.fspath(path)", "path.parent.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p", "Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root):", "\".sublime-project\" def __str__(self): return \"Sublime Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\",", "\"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path, background): if background: raise", "iter_args(self, path, background): if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self,", "\"SharedSupport\", \"bin\") def get_bin_win(self, root): return root # TODO: Inspect Sublime Text to", "p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if not path.is_dir(): return None", "yield \"--new-window\" yield os.fspath(path) def run(self, command): # code and code.cmd on Windows", "\"--background\" if path.suffix == self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield os.fspath(path) def", "app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path, background):", "TODO: Inspect Sublime Text to find where subl.exe is. def iter_args(self, path, background):", "if path.suffix == self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield os.fspath(path) def run(self,", "in [self._find_project_here, self._find_project_in_parent]: found = find(path) if found: return found return None class", "found: return found return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft", "md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\"", "VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio Code\" md_identifier =", "\"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem =", "return \"Visual Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def", "= None display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts =", "md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\" def", "in path.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return", "to find where subl.exe is. def iter_args(self, path, background): if background: yield \"--background\"", "pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft Visual Studio Code\"", "get_bin_win(self, root): return root # TODO: Inspect Sublime Text to find where subl.exe", "are not actual executables, but a batch # script. We need the shell", "return p def _find_project_in_parent(self, path): for p in path.parent.iterdir(): if p.suffix != self.project_suffix:", "Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return", "for p in path.parent.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) >", "app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self, root): return root # TODO: Inspect", "None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft", "if sys.platform == \"win32\": cmd_exts = self.cmd_exts else: cmd_exts = [\"\"] for ext", "command): # code and code.cmd on Windows are not actual executables, but a", "os.access(path, os.X_OK): return path return None def _find_project_here(self, path): for p in path.iterdir():", "_find_project_in_parent(self, path): for p in path.parent.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name,", "class _Tool: def find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts = self.cmd_exts else:", "= 75 class _Tool: def find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts =", "and os.access(path, os.X_OK): return path return None def _find_project_here(self, path): for p in", "os.X_OK): return path return None def _find_project_here(self, path): for p in path.iterdir(): if", "= \".code-workspace\" def __str__(self): return \"Visual Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\",", "Windows are not actual executables, but a batch # script. We need the", "return None def _find_project_here(self, path): for p in path.iterdir(): if p.suffix != self.project_suffix:", "on Windows are not actual executables, but a batch # script. We need", "raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self, command): # code and code.cmd", "\"code\" cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return \"Visual Studio", "where subl.exe is. def iter_args(self, path, background): if background: yield \"--background\" if path.suffix", "\"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path, background): if background:", "Sublime Text to find where subl.exe is. def iter_args(self, path, background): if background:", "\"Resources\", \"app\", \"bin\") def get_bin_win(self, root): return root.joinpath(\"bin\") def iter_args(self, path, background): if", "> FUZZY_FIND_THRESHOLD: return p def _find_project_in_parent(self, path): for p in path.parent.iterdir(): if p.suffix", "Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix =", "FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self, directory): if sys.platform == \"win32\": cmd_exts", "== \"win32\": cmd_exts = self.cmd_exts else: cmd_exts = [\"\"] for ext in cmd_exts:", "p def find_project(self, path): if not path.is_dir(): return None for find in [self._find_project_here,", "for find in [self._find_project_here, self._find_project_in_parent]: found = find(path) if found: return found return", "display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix", "for ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK):", "self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if", "found return None class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix", "executables, but a batch # script. We need the shell to run it.", "None md_identifier = \"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\"", "return \"Sublime Text 3\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"SharedSupport\", \"bin\") def get_bin_win(self,", "else: cmd_exts = [\"\"] for ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if", "= [\"\"] for ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and", "class _DoesNotSupportBackground(ValueError): pass class VisualStudioCode(_Tool): publisher = \"Microsoft Corporation\" display_prefix = \"Microsoft Visual", "FUZZY_FIND_THRESHOLD: return p def find_project(self, path): if not path.is_dir(): return None for find", "subprocess import sys import fuzzywuzzy.fuzz FUZZY_FIND_THRESHOLD = 75 class _Tool: def find_cmd(self, directory):", "self.project_suffix: yield \"--project\" else: yield \"--new-window\" yield os.fspath(path) def run(self, command): return subprocess.call(command)", "found = find(path) if found: return found return None class _DoesNotSupportBackground(ValueError): pass class", "\"win32\")) class SublimeText3(_Tool): publisher = None display_prefix = None md_identifier = \"com.sublimetext.3\" cmd_stem", "\".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return \"Visual Studio Code\" def get_bin_mac(self, app):", "= \"code\" cmd_exts = [\"\", \".cmd\"] project_suffix = \".code-workspace\" def __str__(self): return \"Visual", "path.is_file() and os.access(path, os.X_OK): return path return None def _find_project_here(self, path): for p", "We need the shell to run it. return subprocess.call(command, shell=(sys.platform == \"win32\")) class", "\"com.sublimetext.3\" cmd_stem = \"subl\" cmd_exts = [\"\"] project_suffix = \".sublime-project\" def __str__(self): return", "background): if background: raise _DoesNotSupportBackground() yield \"--new-window\" yield os.fspath(path) def run(self, command): #", "self.cmd_exts else: cmd_exts = [\"\"] for ext in cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\")", "Visual Studio Code\" md_identifier = \"com.microsoft.VSCode\" cmd_stem = \"code\" cmd_exts = [\"\", \".cmd\"]", "\"Visual Studio Code\" def get_bin_mac(self, app): return app.joinpath(\"Contents\", \"Resources\", \"app\", \"bin\") def get_bin_win(self,", "return root # TODO: Inspect Sublime Text to find where subl.exe is. def", "cmd_exts: path = pathlib.Path(directory, f\"{self.cmd_stem}{ext}\") if path.is_file() and os.access(path, os.X_OK): return path return", "p in path.iterdir(): if p.suffix != self.project_suffix: continue if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD:" ]
[ "else: raise RuntimeError(\"Unable to find version string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\",", "match.group(1) else: raise RuntimeError(\"Unable to find version string in __init__\") setup( name=\"iterpipe\", version=get_version(),", "to find version string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\",", "setup def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read() vregex =", "license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to", "match = re.search(vregex, vline, re.M) if match: return match.group(1) else: raise RuntimeError(\"Unable to", "'pytest-cov'], classifiers=[ \"Development Status :: 4 - Beta\", 'Intended Audience :: Developers', \"License", "= vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if", "Audience :: Developers', \"License :: OSI Approved :: BSD License\", 'Operating System ::", "RuntimeError(\"Unable to find version string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\",", "Developers', \"License :: OSI Approved :: BSD License\", 'Operating System :: OS Independent',", "r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if match: return match.group(1) else:", "setuptools import setup def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read()", "vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if match:", "to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4 - Beta\", 'Intended", "install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4 - Beta\", 'Intended Audience ::", "= ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if match: return match.group(1) else: raise", "Approved :: BSD License\", 'Operating System :: OS Independent', 'Programming Language :: Python", "BSD License\", 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.5',", "tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4 - Beta\", 'Intended Audience :: Developers',", "classifiers=[ \"Development Status :: 4 - Beta\", 'Intended Audience :: Developers', \"License ::", "author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of", "vline = vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M)", "OSI Approved :: BSD License\", 'Operating System :: OS Independent', 'Programming Language ::", "- Beta\", 'Intended Audience :: Developers', \"License :: OSI Approved :: BSD License\",", "re.search(vregex, vline, re.M) if match: return match.group(1) else: raise RuntimeError(\"Unable to find version", "'Intended Audience :: Developers', \"License :: OSI Approved :: BSD License\", 'Operating System", "System :: OS Independent', 'Programming Language :: Python :: 3.5', \"Topic :: Utilities\"])", ":: Developers', \"License :: OSI Approved :: BSD License\", 'Operating System :: OS", ":: BSD License\", 'Operating System :: OS Independent', 'Programming Language :: Python ::", "['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if match: return match.group(1) else: raise RuntimeError(\"Unable", "return match.group(1) else: raise RuntimeError(\"Unable to find version string in __init__\") setup( name=\"iterpipe\",", "License\", 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.5', \"Topic", ":: OSI Approved :: BSD License\", 'Operating System :: OS Independent', 'Programming Language", "= r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if match: return match.group(1)", "vfh: vline = vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline,", "import setup def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read() vregex", "pipeline of functions to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status", "import re from setuptools import setup def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh:", "vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex, vline, re.M) if match: return", "\"License :: OSI Approved :: BSD License\", 'Operating System :: OS Independent', 'Programming", "functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to apply to iterables\",", "keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to apply", "<reponame>TobiasHerr/iterpipe-fork import re from setuptools import setup def get_version(): with open('iterpipe/__init__.py', \"r\") as", "vline, re.M) if match: return match.group(1) else: raise RuntimeError(\"Unable to find version string", "__init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'':", "version string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing", "with open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"", "as vfh: vline = vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match = re.search(vregex,", "setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'},", "of functions to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status ::", "packages=['iterpipe'], long_description=\"compose pipeline of functions to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[", "iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4 - Beta\", 'Intended Audience", "4 - Beta\", 'Intended Audience :: Developers', \"License :: OSI Approved :: BSD", "get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read() vregex = r\"^__version__ =", "= re.search(vregex, vline, re.M) if match: return match.group(1) else: raise RuntimeError(\"Unable to find", "description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions", "in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\",", "open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match", "re from setuptools import setup def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline", "long_description=\"compose pipeline of functions to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development", "package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to apply to iterables\", install_requires=[], tests_require=['pytest',", "\"Development Status :: 4 - Beta\", 'Intended Audience :: Developers', \"License :: OSI", "'Operating System :: OS Independent', 'Programming Language :: Python :: 3.5', \"Topic ::", "Status :: 4 - Beta\", 'Intended Audience :: Developers', \"License :: OSI Approved", "url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to apply to iterables\", install_requires=[],", "multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to apply to", "def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline = vfh.read() vregex = r\"^__version__", "string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\",", "to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4 -", ":: 4 - Beta\", 'Intended Audience :: Developers', \"License :: OSI Approved ::", "find version string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel", "from setuptools import setup def get_version(): with open('iterpipe/__init__.py', \"r\") as vfh: vline =", "if match: return match.group(1) else: raise RuntimeError(\"Unable to find version string in __init__\")", "re.M) if match: return match.group(1) else: raise RuntimeError(\"Unable to find version string in", "functions to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4", "author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose pipeline", "version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'], long_description=\"compose", "name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"iterpipe\", license=\"BSD\", keywords=\"parallel multiprocessing functional\", url=\"https://github.com/perrygeo/iterpipe\", package_dir={'': '.'}, packages=['iterpipe'],", "Beta\", 'Intended Audience :: Developers', \"License :: OSI Approved :: BSD License\", 'Operating", "apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'], classifiers=[ \"Development Status :: 4 - Beta\",", "match: return match.group(1) else: raise RuntimeError(\"Unable to find version string in __init__\") setup(", "'.'}, packages=['iterpipe'], long_description=\"compose pipeline of functions to apply to iterables\", install_requires=[], tests_require=['pytest', 'pytest-cov'],", "\"r\") as vfh: vline = vfh.read() vregex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" match =", "raise RuntimeError(\"Unable to find version string in __init__\") setup( name=\"iterpipe\", version=get_version(), author=\"<NAME>\", author_email=\"<EMAIL>\"," ]
[ "of just this file done = True for i in xrange(args.threads): thr =", "1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" %", "# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN", "except TypeError: # key too short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: #", "or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of concurrent requests [1..64]') args", "= time.time() # XXX show total stats else: logging.error('block blobs cannot be uploaded", "this list of conditions and the following disclaimer. # 2. Redistributions in binary", "ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #", "upload as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key,", "queue = Queue.Queue(args.threads * 2) done = False def request_handler(): global done thr", "All rights reserved. # # Redistribution and use in source and binary forms,", "1. Redistributions of source code must retain the above copyright # notice, this", "default='page', help='the type of blob to create (page or block)') ap.add_argument('--threads', dest='threads', type=int,", "sys.exit(1) if args.blob_type not in ['page', 'block']: logging.error('%s is not a valid blob", "range, type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range)) attempts", "argparse import azure import azure.storage import logging import os import socket import sys", "done thr = threading.currentThread() while not done: try: (bs, cntnr, file, data, range,", "to create container %s' % (args.container)) sys.exit(2) except TypeError: # key too short", "are permitted provided that the following conditions # are met: # # 1.", "(args.blob_type)) sys.exit(1) if args.threads < 1 or args.threads > 64: logging.error('%s is not", "type = \"update\" if chunk_type != type: uploaded += page_write(blobsvc, container, file, chunk_data,", "opening %s: %s (errno=%d)' % (file, e.strerror, e.errno)) return (False, 0, 0) f.seek(0,", "logging import os import socket import sys import threading import time import Queue", "offset, size, type): if type != \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" %", "invalid account logging.error('unable to create container %s' % (args.container)) sys.exit(2) except TypeError: #", "and the following disclaimer. # 2. Redistributions in binary form must reproduce the", "end = offset + size - 1 range = 'bytes=%lu-%lu' % (offset, end)", "blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type = None chunk_start =", "chunk_type) logging.info('%s: waiting for upload to complete' % (file)) queue.join() return (True, filesize,", "5: attempts += 1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success = True", "import time import Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE = 512 default_account", "THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "success = True except Exception: pass if success: if attempts > 1: logging.warning(\"%s:", "% (offset, end) queue.put((bs, container, file, data, range, type)) return size def page_upload(blobsvc,", "chunk_type = None chunk_start = 0 chunk_size = 0 chunk_data = None uploaded", "if attempts > 1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s:", "FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE", "number of concurrent requests [1..64]') args = ap.parse_args() if not args.account or not", "% (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name, range)) # XXX", "default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+',", "are met: # # 1. Redistributions of source code must retain the above", "args.container is None: logging.error('Missing container name') sys.exit(1) if args.blob_type not in ['page', 'block']:", "conditions and the following disclaimer in the # documentation and/or other materials provided", "bytes)' % (file, PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading %s' % (file))", "+= uploaded end_time = time.time() # XXX show total stats else: logging.error('block blobs", "= threading.currentThread() while not done: try: (bs, cntnr, file, data, range, type) =", "and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED", "for upload to complete' % (file)) queue.join() return (True, filesize, uploaded) if args.blob_type", "thread argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None,", "+= page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None offset +=", "AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "data, range, type)) return size def page_upload(blobsvc, container, file): try: f = open(file,", "try: bs.create_container(args.container, None, None, False) except socket.gaierror as e: # invalid account logging.error('unable", "True except Exception: pass if success: if attempts > 1: logging.warning(\"%s: %s attempts\"", "import os import socket import sys import threading import time import Queue MAX_SIZE", "while not done: try: (bs, cntnr, file, data, range, type) = queue.get(timeout=2) except", "logging.info(\"%s: %s\" % (thr.name, range)) attempts = 0 success = False while not", "XXX show total stats else: logging.error('block blobs cannot be uploaded by us yet')", "0, 0) f.seek(0, os.SEEK_END) filesize = f.tell() if filesize % PAGE_SIZE: logging.error('%s is", "and/or --key information') sys.exit(1) if args.container is None: logging.error('Missing container name') sys.exit(1) if", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE", "ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "if args.blob_type not in ['page', 'block']: logging.error('%s is not a valid blob type'", "for file in args.files: file_start_time = time.time() (status, filesize, uploaded) = page_upload(bs, args.container,", "import threading import time import Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE =", "Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key", "offset < filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type", "chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start = offset chunk_size = 0 chunk_data", "notice, this list of conditions and the following disclaimer in the # documentation", "2) done = False def request_handler(): global done thr = threading.currentThread() while not", "in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file, data, offset,", "= ap.parse_args() if not args.account or not args.key: logging.error('Missing --account and/or --key information')", "MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR", "None, None, False) except socket.gaierror as e: # invalid account logging.error('unable to create", "type chunk_start = offset chunk_size = 0 chunk_data = b'' chunk_size += PAGE_SIZE", "THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR", "in args.files: file_start_time = time.time() (status, filesize, uploaded) = page_upload(bs, args.container, file) file_end_time", "(file)) queue.join() return (True, filesize, uploaded) if args.blob_type == 'page': total_uploaded = 0", "= 4 * 1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY')", "= False while not success and attempts < 5: attempts += 1 try:", "account logging.error('unable to create container %s' % (args.container)) sys.exit(2) except TypeError: # key", "the upload # of just this file done = True for i in", "default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files',", "azure.storage import logging import os import socket import sys import threading import time", "None chunk_start = 0 chunk_size = 0 chunk_data = None uploaded = 0", "# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #", "args.threads > 64: logging.error('%s is not a valid thread argument' % (args.threads)) sys.exit(1)", "# XXX show file stats total_uploaded += uploaded end_time = time.time() # XXX", "= 0 chunk_type = None chunk_start = 0 chunk_size = 0 chunk_data =", "IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob to", "= type chunk_start = offset chunk_size = 0 chunk_data = b'' chunk_size +=", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse import azure import", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN", "def page_upload(blobsvc, container, file): try: f = open(file, 'rb') except IOError as e:", "'', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type = None chunk_start = 0 chunk_size", "(args.container)) sys.exit(2) except TypeError: # key too short logging.error('invalid access key') sys.exit(2) except", "type=str, nargs='+', help='file to upload as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account", "filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type = \"clear\"", "%s\" % (thr.name, range)) # XXX this terminates the prohgram, it doesn't stop", "show file stats total_uploaded += uploaded end_time = time.time() # XXX show total", "of blob to create (page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number", "size)) end = offset + size - 1 range = 'bytes=%lu-%lu' % (offset,", "NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A", "or without # modification, are permitted provided that the following conditions # are", "help='file to upload as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key',", "dest='blob_type', default='page', help='the type of blob to create (page or block)') ap.add_argument('--threads', dest='threads',", "# key too short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong)", "the page size (= %d bytes)' % (file, PAGE_SIZE)) f.close() return (False, filesize,", "as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage", "(file, offset, size)) end = offset + size - 1 range = 'bytes=%lu-%lu'", "above copyright # notice, this list of conditions and the following disclaimer. #", "args.key: logging.error('Missing --account and/or --key information') sys.exit(1) if args.container is None: logging.error('Missing container", "# documentation and/or other materials provided with the distribution. # # THIS SOFTWARE", "container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload to complete' %", "rights reserved. # # Redistribution and use in source and binary forms, with", "+= 1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success = True except Exception:", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER", "except IOError as e: logging.error('error opening %s: %s (errno=%d)' % (file, e.strerror, e.errno))", "i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file, data,", "upload # of just this file done = True for i in xrange(args.threads):", "offset + size - 1 range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container,", "e: logging.error('error opening %s: %s (errno=%d)' % (file, e.strerror, e.errno)) return (False, 0,", "= 0 start_time = time.time() for file in args.files: file_start_time = time.time() (status,", "THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "Redistributions in binary form must reproduce the above copyright # notice, this list", "* 2) done = False def request_handler(): global done thr = threading.currentThread() while", "azure import azure.storage import logging import os import socket import sys import threading", "(thr.name, range)) # XXX this terminates the prohgram, it doesn't stop the upload", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR", "(bs, cntnr, file, data, range, type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\"", "cntnr, file, data, range, type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" %", "start_time = time.time() for file in args.files: file_start_time = time.time() (status, filesize, uploaded)", "--account and/or --key information') sys.exit(1) if args.container is None: logging.error('Missing container name') sys.exit(1)", "BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR", "help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access key') ap.add_argument('--container', dest='container', default=None,", "help='the number of concurrent requests [1..64]') args = ap.parse_args() if not args.account or", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)", "with or without # modification, are permitted provided that the following conditions #", "IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED", "ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access key')", "blob type' % (args.blob_type)) sys.exit(1) if args.threads < 1 or args.threads > 64:", "size - 1 range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file, data,", "= 0 chunk_data = None uploaded = 0 while offset < filesize: f.seek(offset,", "(= %d bytes)' % (file, PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading %s'", "metavar='file', type=str, nargs='+', help='file to upload as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage", "time import Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE = 512 default_account =", "not a valid thread argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try:", "attempts > 1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED", "except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads", "waiting for upload to complete' % (file)) queue.join() return (True, filesize, uploaded) if", "container, file, data, offset, size, type): if type != \"update\": return 0 logging.info(\"%s:", "if args.container is None: logging.error('Missing container name') sys.exit(1) if args.blob_type not in ['page',", "chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None offset += PAGE_SIZE uploaded += page_write(blobsvc,", "data, offset, size, type): if type != \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\"", "length=%lu\" % (file, offset, size)) end = offset + size - 1 range", "while not success and attempts < 5: attempts += 1 try: bs.put_page(cntnr, file,", "AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL", "OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE", "short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access", "# Copyright (c) 2015 <NAME> # All rights reserved. # # Redistribution and", "bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False) except socket.gaierror as e:", "page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start = offset", "type=int, default=8, help='the number of concurrent requests [1..64]') args = ap.parse_args() if not", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT", "%s (errno=%d)' % (file, e.strerror, e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END) filesize", "container, file, data, range, type)) return size def page_upload(blobsvc, container, file): try: f", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse", "met: # # 1. Redistributions of source code must retain the above copyright", "sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access key') sys.exit(2) queue =", "args.container, file) file_end_time = time.time() # XXX show file stats total_uploaded += uploaded", "time.time() (status, filesize, uploaded) = page_upload(bs, args.container, file) file_end_time = time.time() # XXX", "code must retain the above copyright # notice, this list of conditions and", "%s' % (args.container)) sys.exit(2) except TypeError: # key too short logging.error('invalid access key')", "sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False) except socket.gaierror as", "# notice, this list of conditions and the following disclaimer in the #", "args = ap.parse_args() if not args.account or not args.key: logging.error('Missing --account and/or --key", "XXX show file stats total_uploaded += uploaded end_time = time.time() # XXX show", "valid thread argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None,", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse import", "uploaded) if args.blob_type == 'page': total_uploaded = 0 start_time = time.time() for file", "0 while offset < filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data ==", "SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "name') sys.exit(1) if args.blob_type not in ['page', 'block']: logging.error('%s is not a valid", "conditions # are met: # # 1. Redistributions of source code must retain", "1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success = True except Exception: pass", "== \"update\": chunk_data += data if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container,", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR", "copyright # notice, this list of conditions and the following disclaimer. # 2.", "% (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False) except", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS", "ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY", "(wrong) key logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads * 2) done =", "PAGE_SIZE: logging.error('%s is not a multiple of the page size (= %d bytes)'", "%s\" % (thr.name, range)) attempts = 0 success = False while not success", "of the page size (= %d bytes)' % (file, PAGE_SIZE)) f.close() return (False,", "= 0 success = False while not success and attempts < 5: attempts", "access key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access key') sys.exit(2)", "file, data, x_ms_range=range, x_ms_page_write=type) success = True except Exception: pass if success: if", "INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED", "os import socket import sys import threading import time import Queue MAX_SIZE =", "except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range)) attempts = 0 success =", "= 0 chunk_size = 0 chunk_data = None uploaded = 0 while offset", "the following disclaimer. # 2. Redistributions in binary form must reproduce the above", "WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #", "not done: try: (bs, cntnr, file, data, range, type) = queue.get(timeout=2) except Queue.Empty:", "type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range)) attempts =", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF #", "(True, filesize, uploaded) if args.blob_type == 'page': total_uploaded = 0 start_time = time.time()", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY,", "0 chunk_data = None uploaded = 0 while offset < filesize: f.seek(offset, os.SEEK_SET)", "# notice, this list of conditions and the following disclaimer. # 2. Redistributions", "= \"update\" if chunk_type != type: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start,", "'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type = None chunk_start = 0 chunk_size =", "and use in source and binary forms, with or without # modification, are", "\"clear\" else: type = \"update\" if chunk_type != type: uploaded += page_write(blobsvc, container,", "of concurrent requests [1..64]') args = ap.parse_args() if not args.account or not args.key:", "attempts < 5: attempts += 1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success", "1 or args.threads > 64: logging.error('%s is not a valid thread argument' %", "--key information') sys.exit(1) if args.container is None: logging.error('Missing container name') sys.exit(1) if args.blob_type", "except socket.gaierror as e: # invalid account logging.error('unable to create container %s' %", "too short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid", "CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type)", "type)) return size def page_upload(blobsvc, container, file): try: f = open(file, 'rb') except", "file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type = None chunk_start = 0", "= open(file, 'rb') except IOError as e: logging.error('error opening %s: %s (errno=%d)' %", "= b'' chunk_size += PAGE_SIZE if type == \"update\": chunk_data += data if", "args.account or not args.key: logging.error('Missing --account and/or --key information') sys.exit(1) if args.container is", "this list of conditions and the following disclaimer in the # documentation and/or", "> 1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\"", "type == \"update\": chunk_data += data if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc,", "logging.error(\"%s: FAILED %s\" % (thr.name, range)) # XXX this terminates the prohgram, it", "end) queue.put((bs, container, file, data, range, type)) return size def page_upload(blobsvc, container, file):", "DAMAGE. # import argparse import azure import azure.storage import logging import os import", "bs.create_container(args.container, None, None, False) except socket.gaierror as e: # invalid account logging.error('unable to", "% (thr.name, range)) # XXX this terminates the prohgram, it doesn't stop the", "multiple of the page size (= %d bytes)' % (file, PAGE_SIZE)) f.close() return", "offset chunk_size = 0 chunk_data = b'' chunk_size += PAGE_SIZE if type ==", "OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES #", "x_ms_blob_content_length=filesize) offset = 0 chunk_type = None chunk_start = 0 chunk_size = 0", "sys import threading import time import Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE", "while offset < filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE):", "return (True, filesize, uploaded) if args.blob_type == 'page': total_uploaded = 0 start_time =", "% (args.blob_type)) sys.exit(1) if args.threads < 1 or args.threads > 64: logging.error('%s is", "# are met: # # 1. Redistributions of source code must retain the", "disclaimer in the # documentation and/or other materials provided with the distribution. #", "queue.put((bs, container, file, data, range, type)) return size def page_upload(blobsvc, container, file): try:", "uploaded end_time = time.time() # XXX show total stats else: logging.error('block blobs cannot", "THE POSSIBILITY OF SUCH DAMAGE. # import argparse import azure import azure.storage import", "not in ['page', 'block']: logging.error('%s is not a valid blob type' % (args.blob_type))", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA,", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse import azure", "(False, 0, 0) f.seek(0, os.SEEK_END) filesize = f.tell() if filesize % PAGE_SIZE: logging.error('%s", "``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "f = open(file, 'rb') except IOError as e: logging.error('error opening %s: %s (errno=%d)'", "import socket import sys import threading import time import Queue MAX_SIZE = 4", "!= \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size)) end =", "chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start = offset chunk_size = 0", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF", "= Queue.Queue(args.threads * 2) done = False def request_handler(): global done thr =", "azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False) except socket.gaierror as e: # invalid", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type = \"clear\" else:", "(args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False) except socket.gaierror", "it doesn't stop the upload # of just this file done = True", "f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type = \"clear\" else: type = \"update\" if", "stats else: logging.error('block blobs cannot be uploaded by us yet') sys.exit(1) done =", "def page_write(bs, container, file, data, offset, size, type): if type != \"update\": return", "<NAME> # All rights reserved. # # Redistribution and use in source and", "must retain the above copyright # notice, this list of conditions and the", "file done = True for i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start()", "IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", "+= PAGE_SIZE if type == \"update\": chunk_data += data if chunk_size == MAX_SIZE:", "Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range)) attempts = 0 success = False", "file stats total_uploaded += uploaded end_time = time.time() # XXX show total stats", "None, False) except socket.gaierror as e: # invalid account logging.error('unable to create container", "Redistribution and use in source and binary forms, with or without # modification,", "% (file, PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container,", "# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS", "OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "source and binary forms, with or without # modification, are permitted provided that", "following conditions # are met: # # 1. Redistributions of source code must", "information') sys.exit(1) if args.container is None: logging.error('Missing container name') sys.exit(1) if args.blob_type not", "< 5: attempts += 1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success =", "FAILED %s\" % (thr.name, range)) # XXX this terminates the prohgram, it doesn't", "return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size)) end = offset +", "try: f = open(file, 'rb') except IOError as e: logging.error('error opening %s: %s", "= 0 chunk_data = b'' chunk_size += PAGE_SIZE if type == \"update\": chunk_data", "WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF", "above copyright # notice, this list of conditions and the following disclaimer in", "done = False def request_handler(): global done thr = threading.currentThread() while not done:", "bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success = True except Exception: pass if success:", "filesize, uploaded) = page_upload(bs, args.container, file) file_end_time = time.time() # XXX show file", "open(file, 'rb') except IOError as e: logging.error('error opening %s: %s (errno=%d)' % (file,", "PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE", "binary form must reproduce the above copyright # notice, this list of conditions", "= time.time() (status, filesize, uploaded) = page_upload(bs, args.container, file) file_end_time = time.time() #", "Redistributions of source code must retain the above copyright # notice, this list", "filesize = f.tell() if filesize % PAGE_SIZE: logging.error('%s is not a multiple of", "form must reproduce the above copyright # notice, this list of conditions and", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #", "STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "file_end_time = time.time() # XXX show file stats total_uploaded += uploaded end_time =", "as e: logging.error('error opening %s: %s (errno=%d)' % (file, e.strerror, e.errno)) return (False,", "= time.time() for file in args.files: file_start_time = time.time() (status, filesize, uploaded) =", "LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "of conditions and the following disclaimer. # 2. Redistributions in binary form must", "type = \"clear\" else: type = \"update\" if chunk_type != type: uploaded +=", "not a valid blob type' % (args.blob_type)) sys.exit(1) if args.threads < 1 or", "type' % (args.blob_type)) sys.exit(1) if args.threads < 1 or args.threads > 64: logging.error('%s", "1 range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file, data, range, type))", "a multiple of the page size (= %d bytes)' % (file, PAGE_SIZE)) f.close()", "help='storage account access key') ap.add_argument('--container', dest='container', default=None, help='storage container to upload to') ap.add_argument('--blob-type',", "to create (page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of concurrent", "= True for i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs,", "type: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type", "chunk_type) chunk_type = type chunk_start = offset chunk_size = 0 chunk_data = b''", "following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright", "PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR", "key logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads * 2) done = False", "thr = threading.currentThread() while not done: try: (bs, cntnr, file, data, range, type)", "logging.error('%s is not a multiple of the page size (= %d bytes)' %", "access key') sys.exit(2) queue = Queue.Queue(args.threads * 2) done = False def request_handler():", "default=8, help='the number of concurrent requests [1..64]') args = ap.parse_args() if not args.account", "64: logging.error('%s is not a valid thread argument' % (args.threads)) sys.exit(1) bs =", "range)) attempts = 0 success = False while not success and attempts <", "# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "invalid (wrong) key logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads * 2) done", "argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload as a blob')", "attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name, range)) #", "filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset =", "ap.parse_args() if not args.account or not args.key: logging.error('Missing --account and/or --key information') sys.exit(1)", "the above copyright # notice, this list of conditions and the following disclaimer", "key too short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key", "= True except Exception: pass if success: if attempts > 1: logging.warning(\"%s: %s", "ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY", "uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None offset", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE,", "azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads *", "uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload as a blob') ap.add_argument('--account', dest='account',", "else: logging.error('block blobs cannot be uploaded by us yet') sys.exit(1) done = True", "(file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type = None chunk_start", "0) f.seek(0, os.SEEK_END) filesize = f.tell() if filesize % PAGE_SIZE: logging.error('%s is not", "attempts += 1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success = True except", "documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS", "0 start_time = time.time() for file in args.files: file_start_time = time.time() (status, filesize,", "'block']: logging.error('%s is not a valid blob type' % (args.blob_type)) sys.exit(1) if args.threads", "[1..64]') args = ap.parse_args() if not args.account or not args.key: logging.error('Missing --account and/or", "'rb') except IOError as e: logging.error('error opening %s: %s (errno=%d)' % (file, e.strerror,", "and the following disclaimer in the # documentation and/or other materials provided with", "to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob to create (page", "doesn't stop the upload # of just this file done = True for", "OF SUCH DAMAGE. # import argparse import azure import azure.storage import logging import", "chunk_data = None uploaded = 0 while offset < filesize: f.seek(offset, os.SEEK_SET) data", "% (args.container)) sys.exit(2) except TypeError: # key too short logging.error('invalid access key') sys.exit(2)", "0 success = False while not success and attempts < 5: attempts +=", "forms, with or without # modification, are permitted provided that the following conditions", "time.time() # XXX show file stats total_uploaded += uploaded end_time = time.time() #", "range)) # XXX this terminates the prohgram, it doesn't stop the upload #", "thr.setDaemon(True) thr.start() def page_write(bs, container, file, data, offset, size, type): if type !=", "= None chunk_start = 0 chunk_size = 0 chunk_data = None uploaded =", "uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload", "if chunk_type != type: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type)", "# # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY", "= f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type = \"clear\" else: type = \"update\"", "#!/usr/bin/env python # # Copyright (c) 2015 <NAME> # All rights reserved. #", "source code must retain the above copyright # notice, this list of conditions", "conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce", "attempts = 0 success = False while not success and attempts < 5:", "in source and binary forms, with or without # modification, are permitted provided", "or args.threads > 64: logging.error('%s is not a valid thread argument' % (args.threads))", "= azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False) except socket.gaierror as e: #", "import azure import azure.storage import logging import os import socket import sys import", "OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "# # Redistribution and use in source and binary forms, with or without", "AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "dest='threads', type=int, default=8, help='the number of concurrent requests [1..64]') args = ap.parse_args() if", "queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range)) attempts = 0 success", "uploaded = 0 while offset < filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if", "just this file done = True for i in xrange(args.threads): thr = threading.Thread(target=request_handler)", "name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access key') ap.add_argument('--container', dest='container', default=None, help='storage container", "account_key=args.key) try: bs.create_container(args.container, None, None, False) except socket.gaierror as e: # invalid account", "try: (bs, cntnr, file, data, range, type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s:", "OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "data, x_ms_range=range, x_ms_page_write=type) success = True except Exception: pass if success: if attempts", "offset, size)) end = offset + size - 1 range = 'bytes=%lu-%lu' %", "in the # documentation and/or other materials provided with the distribution. # #", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import", "page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None offset += PAGE_SIZE", "IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "None uploaded = 0 while offset < filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE)", "DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "nargs='+', help='file to upload as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name')", "block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of concurrent requests [1..64]') args =", "the following conditions # are met: # # 1. Redistributions of source code", "this terminates the prohgram, it doesn't stop the upload # of just this", "# import argparse import azure import azure.storage import logging import os import socket", "import logging import os import socket import sys import threading import time import", "dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access key') ap.add_argument('--container',", "logging.error('Missing --account and/or --key information') sys.exit(1) if args.container is None: logging.error('Missing container name')", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload to complete' % (file)) queue.join() return", "permitted provided that the following conditions # are met: # # 1. Redistributions", "NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL,", "success = False while not success and attempts < 5: attempts += 1", "logging.error('error opening %s: %s (errno=%d)' % (file, e.strerror, e.errno)) return (False, 0, 0)", "DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "% (file, e.strerror, e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END) filesize = f.tell()", "ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "file_start_time = time.time() (status, filesize, uploaded) = page_upload(bs, args.container, file) file_end_time = time.time()", "# XXX this terminates the prohgram, it doesn't stop the upload # of", "= os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file", "# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND", "continue logging.info(\"%s: %s\" % (thr.name, range)) attempts = 0 success = False while", "HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "% (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type = None", "list of conditions and the following disclaimer. # 2. Redistributions in binary form", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR", "file): try: f = open(file, 'rb') except IOError as e: logging.error('error opening %s:", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY", "attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name, range)) # XXX this terminates", "args.blob_type not in ['page', 'block']: logging.error('%s is not a valid blob type' %", "= threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file, data, offset, size, type): if", "<reponame>xcllnt/azure-upload<filename>azure-upload.py<gh_stars>0 #!/usr/bin/env python # # Copyright (c) 2015 <NAME> # All rights reserved.", "EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL,", "= 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file, data, range, type)) return size", "(errno=%d)' % (file, e.strerror, e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END) filesize =", "use in source and binary forms, with or without # modification, are permitted", "in binary form must reproduce the above copyright # notice, this list of", "= os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file',", "b'' chunk_size += PAGE_SIZE if type == \"update\": chunk_data += data if chunk_size", "container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None offset += PAGE_SIZE uploaded", "file, data, offset, size, type): if type != \"update\": return 0 logging.info(\"%s: offset=%lu,", "python # # Copyright (c) 2015 <NAME> # All rights reserved. # #", "import argparse import azure import azure.storage import logging import os import socket import", "container name') sys.exit(1) if args.blob_type not in ['page', 'block']: logging.error('%s is not a", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF", "(thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name, range)) # XXX this", "time.time() for file in args.files: file_start_time = time.time() (status, filesize, uploaded) = page_upload(bs,", "ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of concurrent requests [1..64]') args = ap.parse_args()", "return (False, 0, 0) f.seek(0, os.SEEK_END) filesize = f.tell() if filesize % PAGE_SIZE:", "chunk_size = 0 chunk_data = None uploaded = 0 while offset < filesize:", "== 'page': total_uploaded = 0 start_time = time.time() for file in args.files: file_start_time", "IOError as e: logging.error('error opening %s: %s (errno=%d)' % (file, e.strerror, e.errno)) return", "0 chunk_data = b'' chunk_size += PAGE_SIZE if type == \"update\": chunk_data +=", "# All rights reserved. # # Redistribution and use in source and binary", "size, type): if type != \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file,", "logging.info('%s: waiting for upload to complete' % (file)) queue.join() return (True, filesize, uploaded)", "BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING,", "False) except socket.gaierror as e: # invalid account logging.error('unable to create container %s'", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS", "CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "global done thr = threading.currentThread() while not done: try: (bs, cntnr, file, data,", "BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "4 * 1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO)", "# invalid account logging.error('unable to create container %s' % (args.container)) sys.exit(2) except TypeError:", "return size def page_upload(blobsvc, container, file): try: f = open(file, 'rb') except IOError", "modification, are permitted provided that the following conditions # are met: # #", "# modification, are permitted provided that the following conditions # are met: #", "else: type = \"update\" if chunk_type != type: uploaded += page_write(blobsvc, container, file,", "f.close() return (False, filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob',", "= time.time() # XXX show file stats total_uploaded += uploaded end_time = time.time()", "args.files: file_start_time = time.time() (status, filesize, uploaded) = page_upload(bs, args.container, file) file_end_time =", "thr.start() def page_write(bs, container, file, data, offset, size, type): if type != \"update\":", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "%s: %s (errno=%d)' % (file, e.strerror, e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END)", "offset=%lu, length=%lu\" % (file, offset, size)) end = offset + size - 1", "+= page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload to", "blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload as a blob') ap.add_argument('--account',", "logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload", "= f.tell() if filesize % PAGE_SIZE: logging.error('%s is not a multiple of the", "== MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type =", "logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name,", "chunk_start = offset chunk_size = 0 chunk_data = b'' chunk_size += PAGE_SIZE if", "PAGE_SIZE if type == \"update\": chunk_data += data if chunk_size == MAX_SIZE: uploaded", "upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob to create (page or", "0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0", "total_uploaded = 0 start_time = time.time() for file in args.files: file_start_time = time.time()", "logging.error('%s is not a valid blob type' % (args.blob_type)) sys.exit(1) if args.threads <", "\"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size)) end = offset", "% (file, offset, size)) end = offset + size - 1 range =", "if not args.account or not args.key: logging.error('Missing --account and/or --key information') sys.exit(1) if", "container, file): try: f = open(file, 'rb') except IOError as e: logging.error('error opening", "Copyright (c) 2015 <NAME> # All rights reserved. # # Redistribution and use", "chunk_type) chunk_type = None offset += PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data,", "container %s' % (args.container)) sys.exit(2) except TypeError: # key too short logging.error('invalid access", "- 1 range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file, data, range,", "MAX_SIZE = 4 * 1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key =", "success: if attempts > 1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done() else:", "complete' % (file)) queue.join() return (True, filesize, uploaded) if args.blob_type == 'page': total_uploaded", "USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "= queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range)) attempts = 0", "file) file_end_time = time.time() # XXX show file stats total_uploaded += uploaded end_time", "# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING", "(INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,", "ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN", "file, data, range, type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name,", "chunk_type = None offset += PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start,", "% (thr.name, range)) attempts = 0 success = False while not success and", "argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container, None, None, False)", "SUCH DAMAGE. # import argparse import azure import azure.storage import logging import os", "= 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob", "for i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file,", "POSSIBILITY OF SUCH DAMAGE. # import argparse import azure import azure.storage import logging", "if filesize % PAGE_SIZE: logging.error('%s is not a multiple of the page size", "= 0 while offset < filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data", "= None offset += PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size,", "is not a valid thread argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key)", "valid blob type' % (args.blob_type)) sys.exit(1) if args.threads < 1 or args.threads >", "this file done = True for i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True)", "data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type = \"clear\" else: type =", "# of just this file done = True for i in xrange(args.threads): thr", "2. Redistributions in binary form must reproduce the above copyright # notice, this", "LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "end_time = time.time() # XXX show total stats else: logging.error('block blobs cannot be", "account access key') ap.add_argument('--container', dest='container', default=None, help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type',", "chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type", "THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR", "# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file, data, range, type)) return", "container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob to create", "and attempts < 5: attempts += 1 try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type)", "PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '',", "reproduce the above copyright # notice, this list of conditions and the following", "file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload to complete' % (file))", "pass if success: if attempts > 1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts))", "= argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload as a", "thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file, data, offset, size, type):", "# invalid (wrong) key logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads * 2)", "as e: # invalid account logging.error('unable to create container %s' % (args.container)) sys.exit(2)", "if args.blob_type == 'page': total_uploaded = 0 start_time = time.time() for file in", "0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size)) end = offset + size", "that the following conditions # are met: # # 1. Redistributions of source", "and binary forms, with or without # modification, are permitted provided that the", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO", "blob to create (page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of", "(False, filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset", "total stats else: logging.error('block blobs cannot be uploaded by us yet') sys.exit(1) done", "if success: if attempts > 1: logging.warning(\"%s: %s attempts\" % (thr.name, attempts)) queue.task_done()", "except Exception: pass if success: if attempts > 1: logging.warning(\"%s: %s attempts\" %", "range, type)) return size def page_upload(blobsvc, container, file): try: f = open(file, 'rb')", "import sys import threading import time import Queue MAX_SIZE = 4 * 1048576", "chunk_type = type chunk_start = offset chunk_size = 0 chunk_data = b'' chunk_size", "% PAGE_SIZE: logging.error('%s is not a multiple of the page size (= %d", "threading import time import Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE = 512", "queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name, range)) # XXX this terminates the", "e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END) filesize = f.tell() if filesize %", "following disclaimer in the # documentation and/or other materials provided with the distribution.", "with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS", "def request_handler(): global done thr = threading.currentThread() while not done: try: (bs, cntnr,", "create (page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of concurrent requests", "(page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the number of concurrent requests [1..64]')", "not a multiple of the page size (= %d bytes)' % (file, PAGE_SIZE))", "data == bytearray(PAGE_SIZE): type = \"clear\" else: type = \"update\" if chunk_type !=", "dest='key', default=default_key, help='storage account access key') ap.add_argument('--container', dest='container', default=None, help='storage container to upload", "stop the upload # of just this file done = True for i", "PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for", "# # 1. Redistributions of source code must retain the above copyright #", "default=None, help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob", "x_ms_page_write=type) success = True except Exception: pass if success: if attempts > 1:", "materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE", "success and attempts < 5: attempts += 1 try: bs.put_page(cntnr, file, data, x_ms_range=range,", "f.tell() if filesize % PAGE_SIZE: logging.error('%s is not a multiple of the page", "page_write(bs, container, file, data, offset, size, type): if type != \"update\": return 0", "upload to complete' % (file)) queue.join() return (True, filesize, uploaded) if args.blob_type ==", "chunk_size, chunk_type) chunk_type = None offset += PAGE_SIZE uploaded += page_write(blobsvc, container, file,", "os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str,", "ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT #", "%d bytes)' % (file, PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading %s' %", "in ['page', 'block']: logging.error('%s is not a valid blob type' % (args.blob_type)) sys.exit(1)", "of conditions and the following disclaimer in the # documentation and/or other materials", "< 1 or args.threads > 64: logging.error('%s is not a valid thread argument'", "%s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type =", "if data == bytearray(PAGE_SIZE): type = \"clear\" else: type = \"update\" if chunk_type", "file in args.files: file_start_time = time.time() (status, filesize, uploaded) = page_upload(bs, args.container, file)", "TypeError: # key too short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: # invalid", "container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start = offset chunk_size", "the prohgram, it doesn't stop the upload # of just this file done", "account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access key') ap.add_argument('--container', dest='container', default=None, help='storage", "chunk_size, chunk_type) chunk_type = type chunk_start = offset chunk_size = 0 chunk_data =", "e.strerror, e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END) filesize = f.tell() if filesize", "> 64: logging.error('%s is not a valid thread argument' % (args.threads)) sys.exit(1) bs", "logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize) offset = 0 chunk_type", "uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start", "# # Copyright (c) 2015 <NAME> # All rights reserved. # # Redistribution", "provided that the following conditions # are met: # # 1. Redistributions of", "requests [1..64]') args = ap.parse_args() if not args.account or not args.key: logging.error('Missing --account", "= offset + size - 1 range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs,", "sys.exit(2) queue = Queue.Queue(args.threads * 2) done = False def request_handler(): global done", "= \"clear\" else: type = \"update\" if chunk_type != type: uploaded += page_write(blobsvc,", "ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload as a blob') ap.add_argument('--account', dest='account', default=default_account,", "sys.exit(2) except TypeError: # key too short logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError:", "size (= %d bytes)' % (file, PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading", "< filesize: f.seek(offset, os.SEEK_SET) data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type =", "SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR #", "import Queue MAX_SIZE = 4 * 1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT')", "type of blob to create (page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8, help='the", "data if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size,", "a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account", "size def page_upload(blobsvc, container, file): try: f = open(file, 'rb') except IOError as", "+= PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting", "BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "(file, PAGE_SIZE)) f.close() return (False, filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file,", "socket import sys import threading import time import Queue MAX_SIZE = 4 *", "(offset, end) queue.put((bs, container, file, data, range, type)) return size def page_upload(blobsvc, container,", "'page': total_uploaded = 0 start_time = time.time() for file in args.files: file_start_time =", "stats total_uploaded += uploaded end_time = time.time() # XXX show total stats else:", "INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT,", "disclaimer. # 2. Redistributions in binary form must reproduce the above copyright #", "logging.error('Missing container name') sys.exit(1) if args.blob_type not in ['page', 'block']: logging.error('%s is not", "chunk_size += PAGE_SIZE if type == \"update\": chunk_data += data if chunk_size ==", "(file, e.strerror, e.errno)) return (False, 0, 0) f.seek(0, os.SEEK_END) filesize = f.tell() if", "chunk_data = b'' chunk_size += PAGE_SIZE if type == \"update\": chunk_data += data", "Exception: pass if success: if attempts > 1: logging.warning(\"%s: %s attempts\" % (thr.name,", "threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file, data, offset, size, type): if type", "OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob to create (page or block)') ap.add_argument('--threads',", "is not a valid blob type' % (args.blob_type)) sys.exit(1) if args.threads < 1", "blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access", "filesize % PAGE_SIZE: logging.error('%s is not a multiple of the page size (=", "2015 <NAME> # All rights reserved. # # Redistribution and use in source", "page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload to complete'", "page_upload(blobsvc, container, file): try: f = open(file, 'rb') except IOError as e: logging.error('error", "if type != \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size))", "filesize, uploaded) if args.blob_type == 'page': total_uploaded = 0 start_time = time.time() for", "['page', 'block']: logging.error('%s is not a valid blob type' % (args.blob_type)) sys.exit(1) if", "x_ms_range=range, x_ms_page_write=type) success = True except Exception: pass if success: if attempts >", "if type == \"update\": chunk_data += data if chunk_size == MAX_SIZE: uploaded +=", "page_upload(bs, args.container, file) file_end_time = time.time() # XXX show file stats total_uploaded +=", "A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE", "offset += PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s:", "args.blob_type == 'page': total_uploaded = 0 start_time = time.time() for file in args.files:", "access key') ap.add_argument('--container', dest='container', default=None, help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page',", "if args.threads < 1 or args.threads > 64: logging.error('%s is not a valid", "file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start = offset chunk_size =", "sys.exit(1) if args.container is None: logging.error('Missing container name') sys.exit(1) if args.blob_type not in", "file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = None offset += PAGE_SIZE uploaded +=", "key') sys.exit(2) queue = Queue.Queue(args.threads * 2) done = False def request_handler(): global", "IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF", "None: logging.error('Missing container name') sys.exit(1) if args.blob_type not in ['page', 'block']: logging.error('%s is", "chunk_size, chunk_type) logging.info('%s: waiting for upload to complete' % (file)) queue.join() return (True,", "notice, this list of conditions and the following disclaimer. # 2. Redistributions in", "% (file)) queue.join() return (True, filesize, uploaded) if args.blob_type == 'page': total_uploaded =", "the following disclaimer in the # documentation and/or other materials provided with the", "%s attempts\" % (thr.name, attempts)) queue.task_done() else: logging.error(\"%s: FAILED %s\" % (thr.name, range))", "dest='container', default=None, help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of", "\"update\": chunk_data += data if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container, file,", "must reproduce the above copyright # notice, this list of conditions and the", "# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF", "chunk_data, chunk_start, chunk_size, chunk_type) logging.info('%s: waiting for upload to complete' % (file)) queue.join()", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "default=default_key, help='storage account access key') ap.add_argument('--container', dest='container', default=None, help='storage container to upload to')", "or not args.key: logging.error('Missing --account and/or --key information') sys.exit(1) if args.container is None:", "chunk_size = 0 chunk_data = b'' chunk_size += PAGE_SIZE if type == \"update\":", "help='the type of blob to create (page or block)') ap.add_argument('--threads', dest='threads', type=int, default=8,", "ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to upload as", "0 chunk_size = 0 chunk_data = None uploaded = 0 while offset <", "\"update\" if chunk_type != type: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size,", "is not a multiple of the page size (= %d bytes)' % (file,", "+= page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type = type chunk_start =", "os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\") ap.add_argument('files', metavar='file', type=str, nargs='+', help='file to", "the # documentation and/or other materials provided with the distribution. # # THIS", "xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container, file, data, offset, size,", "logging.error('unable to create container %s' % (args.container)) sys.exit(2) except TypeError: # key too", "chunk_start, chunk_size, chunk_type) chunk_type = None offset += PAGE_SIZE uploaded += page_write(blobsvc, container,", "other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY", "list of conditions and the following disclaimer in the # documentation and/or other", "to complete' % (file)) queue.join() return (True, filesize, uploaded) if args.blob_type == 'page':", "retain the above copyright # notice, this list of conditions and the following", "the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS''", "key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access key') sys.exit(2) queue", "request_handler(): global done thr = threading.currentThread() while not done: try: (bs, cntnr, file,", "512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze blob uploader\")", "# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,", "args.threads < 1 or args.threads > 64: logging.error('%s is not a valid thread", "threading.currentThread() while not done: try: (bs, cntnr, file, data, range, type) = queue.get(timeout=2)", "LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "page size (= %d bytes)' % (file, PAGE_SIZE)) f.close() return (False, filesize, 0)", "time.time() # XXX show total stats else: logging.error('block blobs cannot be uploaded by", "# XXX show total stats else: logging.error('block blobs cannot be uploaded by us", "False while not success and attempts < 5: attempts += 1 try: bs.put_page(cntnr,", "0 chunk_type = None chunk_start = 0 chunk_size = 0 chunk_data = None", "reserved. # # Redistribution and use in source and binary forms, with or", "concurrent requests [1..64]') args = ap.parse_args() if not args.account or not args.key: logging.error('Missing", "= page_upload(bs, args.container, file) file_end_time = time.time() # XXX show file stats total_uploaded", "# Redistribution and use in source and binary forms, with or without #", "+ size - 1 range = 'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file,", "EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", "False def request_handler(): global done thr = threading.currentThread() while not done: try: (bs,", "not args.key: logging.error('Missing --account and/or --key information') sys.exit(1) if args.container is None: logging.error('Missing", "IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, #", "a valid thread argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account, account_key=args.key) try: bs.create_container(args.container,", "logging.error('invalid access key') sys.exit(2) queue = Queue.Queue(args.threads * 2) done = False def", "Queue.Queue(args.threads * 2) done = False def request_handler(): global done thr = threading.currentThread()", "(c) 2015 <NAME> # All rights reserved. # # Redistribution and use in", "TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "try: bs.put_page(cntnr, file, data, x_ms_range=range, x_ms_page_write=type) success = True except Exception: pass if", "create container %s' % (args.container)) sys.exit(2) except TypeError: # key too short logging.error('invalid", "'bytes=%lu-%lu' % (offset, end) queue.put((bs, container, file, data, range, type)) return size def", "provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR", "(thr.name, range)) attempts = 0 success = False while not success and attempts", "queue.join() return (True, filesize, uploaded) if args.blob_type == 'page': total_uploaded = 0 start_time", "sys.exit(1) if args.threads < 1 or args.threads > 64: logging.error('%s is not a", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS;", "1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap =", "AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "True for i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def page_write(bs, container,", "OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "without # modification, are permitted provided that the following conditions # are met:", "bytearray(PAGE_SIZE): type = \"clear\" else: type = \"update\" if chunk_type != type: uploaded", "logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size)) end = offset + size -", "TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "total_uploaded += uploaded end_time = time.time() # XXX show total stats else: logging.error('block", "PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES,", "data, range, type) = queue.get(timeout=2) except Queue.Empty: continue logging.info(\"%s: %s\" % (thr.name, range))", "binary forms, with or without # modification, are permitted provided that the following", "of source code must retain the above copyright # notice, this list of", "is None: logging.error('Missing container name') sys.exit(1) if args.blob_type not in ['page', 'block']: logging.error('%s", "= False def request_handler(): global done thr = threading.currentThread() while not done: try:", "else: logging.error(\"%s: FAILED %s\" % (thr.name, range)) # XXX this terminates the prohgram,", "SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY,", "return (False, filesize, 0) logging.info('Uploading %s' % (file)) blobsvc.put_blob(container, file, '', 'PageBlob', x_ms_blob_content_length=filesize)", "done: try: (bs, cntnr, file, data, range, type) = queue.get(timeout=2) except Queue.Empty: continue", "os.SEEK_SET) data = f.read(PAGE_SIZE) if data == bytearray(PAGE_SIZE): type = \"clear\" else: type", "chunk_type != type: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type", "to upload as a blob') ap.add_argument('--account', dest='account', default=default_account, help='storage account name') ap.add_argument('--key', dest='key',", "# 2. Redistributions in binary form must reproduce the above copyright # notice,", "key') ap.add_argument('--container', dest='container', default=None, help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the", "import azure.storage import logging import os import socket import sys import threading import", "f.seek(0, os.SEEK_END) filesize = f.tell() if filesize % PAGE_SIZE: logging.error('%s is not a", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT", "(status, filesize, uploaded) = page_upload(bs, args.container, file) file_end_time = time.time() # XXX show", "os.SEEK_END) filesize = f.tell() if filesize % PAGE_SIZE: logging.error('%s is not a multiple", "chunk_start = 0 chunk_size = 0 chunk_data = None uploaded = 0 while", "type != \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset, size)) end", "chunk_data += data if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data,", "to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type of blob to create (page or block)')", "socket.gaierror as e: # invalid account logging.error('unable to create container %s' % (args.container))", "== bytearray(PAGE_SIZE): type = \"clear\" else: type = \"update\" if chunk_type != type:", "THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT", "XXX this terminates the prohgram, it doesn't stop the upload # of just", "the above copyright # notice, this list of conditions and the following disclaimer.", "done = True for i in xrange(args.threads): thr = threading.Thread(target=request_handler) thr.setDaemon(True) thr.start() def", "offset = 0 chunk_type = None chunk_start = 0 chunk_size = 0 chunk_data", "# 1. Redistributions of source code must retain the above copyright # notice,", "file, data, range, type)) return size def page_upload(blobsvc, container, file): try: f =", "ap.add_argument('--container', dest='container', default=None, help='storage container to upload to') ap.add_argument('--blob-type', dest='blob_type', default='page', help='the type", "prohgram, it doesn't stop the upload # of just this file done =", "PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap = argparse.ArgumentParser(description=\"Aruze", "GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "e: # invalid account logging.error('unable to create container %s' % (args.container)) sys.exit(2) except", "not success and attempts < 5: attempts += 1 try: bs.put_page(cntnr, file, data,", "type): if type != \"update\": return 0 logging.info(\"%s: offset=%lu, length=%lu\" % (file, offset,", "uploaded) = page_upload(bs, args.container, file) file_end_time = time.time() # XXX show file stats", "default=default_account, help='storage account name') ap.add_argument('--key', dest='key', default=default_key, help='storage account access key') ap.add_argument('--container', dest='container',", "= offset chunk_size = 0 chunk_data = b'' chunk_size += PAGE_SIZE if type", "FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "None offset += PAGE_SIZE uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type)", "a valid blob type' % (args.blob_type)) sys.exit(1) if args.threads < 1 or args.threads", "!= type: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start, chunk_size, chunk_type) chunk_type =", "+= data if chunk_size == MAX_SIZE: uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start,", "AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT", "not args.account or not args.key: logging.error('Missing --account and/or --key information') sys.exit(1) if args.container", "= None uploaded = 0 while offset < filesize: f.seek(offset, os.SEEK_SET) data =", "show total stats else: logging.error('block blobs cannot be uploaded by us yet') sys.exit(1)", "ap.add_argument('--key', dest='key', default=default_key, help='storage account access key') ap.add_argument('--container', dest='container', default=None, help='storage container to", "OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse import azure import azure.storage", "terminates the prohgram, it doesn't stop the upload # of just this file", "logging.error('%s is not a valid thread argument' % (args.threads)) sys.exit(1) bs = azure.storage.BlobService(account_name=args.account,", "copyright # notice, this list of conditions and the following disclaimer in the", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS", "* 1048576 PAGE_SIZE = 512 default_account = os.getenv('AZURE_STORAGE_ACCOUNT') default_key = os.getenv('AZURE_STORAGE_KEY') logging.basicConfig(level=logging.INFO) ap", "logging.error('invalid access key') sys.exit(2) except azure.WindowsAzureError: # invalid (wrong) key logging.error('invalid access key')", "# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO," ]
[ "is None: print('Index out of bound') return else: new_node = Node(data) new_node.ref =", "None def traverse_list(self): if self.start_node is None: print(\"List is empty\") else: node =", "def insert_before_item(self, value, data): if self.start_node is None: print(\"empty list cannot insert before", "Node(data) while (node is not None): if node.data == value: node = node.ref", "value: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return node =", "data): self.data = data self.ref = None class LinkedList: def __init__(self): self.start_node =", "create a new node first new_node = Node(data) new_node.ref = self.start_node self.start_node =", "self.start_node = new_node return node = self.start_node while(node.ref is not None): node =", "self.start_node is None: print(\"List is empty\") else: node = self.start_node while(node is not", "return def insert_before_item(self, value, data): if self.start_node is None: print(\"empty list cannot insert", "= Node(data) new_node.ref = self.start_node self.start_node = new_node def insert_at_end(self, data): # create", "= node.ref node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10) new_linked_list.insert_at_end(15) new_linked_list.insert_at_start(20) new_linked_list.insert_at_start(40)", "new node first new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node def", "Node(data) new_node.ref = self.start_node self.start_node = new_node return i=0 node = self.start_node while(i<index", "value, data): if self.start_node is None: print(\"empty list cannot insert before given value\")", "while (node is not None): if node.data == value: node = node.ref valueFound", "is None: self.start_node = new_node return node = self.start_node while(node.ref is not None):", "is None: print(\"List is empty\") else: node = self.start_node while(node is not None):", "data): if self.start_node is None: print(\"empty list cannot insert before given value\") return", "insert before given value\") return else: node = self.start_node valueFound = False new_node", "data self.ref = None class LinkedList: def __init__(self): self.start_node = None def traverse_list(self):", "= False new_node = Node(data) while (node.ref is not None): if node.data ==", "return node = self.start_node valueFound = False new_node = Node(data) while (node.ref is", "self.start_node valueFound = False new_node = Node(data) while (node is not None): if", "node.ref node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10) new_linked_list.insert_at_end(15) new_linked_list.insert_at_start(20) new_linked_list.insert_at_start(40) new_linked_list.insert_at_index(3,", "new_node = Node(data) while (node is not None): if node.data == value: node", "<gh_stars>0 class Node: def __init__(self, data): self.data = data self.ref = None class", "return node = self.start_node while(node.ref is not None): node = node.ref node.ref =", "is empty\") else: node = self.start_node while(node is not None): print(node.data , \"", "insert_after_item(self, value, data): if self.start_node is None: print(\"empty list cannot insert before given", "new_node return node = self.start_node valueFound = False new_node = Node(data) while (node.ref", "node.ref = new_node break if valueFound == False: print (\"value not present in", "node first new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node def insert_at_end(self,", "= node.ref node.ref = new_node break if valueFound == False: print (\"value not", "of bound') return else: new_node = Node(data) new_node.ref = node.ref node.ref = new_node", "= new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10) new_linked_list.insert_at_end(15) new_linked_list.insert_at_start(20) new_linked_list.insert_at_start(40) new_linked_list.insert_at_index(3, 30) new_linked_list.traverse_list()", "new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return node = self.start_node", "= new_node return node = self.start_node while(node.ref is not None): node = node.ref", "= node.ref def insert_at_start(self, data): # create a new node first new_node =", "None class LinkedList: def __init__(self): self.start_node = None def traverse_list(self): if self.start_node is", "= node.ref valueFound = True new_node.ref = node node = new_node break if", "return else: new_node = Node(data) new_node.ref = node.ref node.ref = new_node new_linked_list =", "__init__(self): self.start_node = None def traverse_list(self): if self.start_node is None: print(\"List is empty\")", "if self.start_node.data == value: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node", "list if self.start_node is None: self.start_node = new_node return node = self.start_node while(node.ref", "return if self.start_node.data == value: new_node = Node(data) new_node.ref = self.start_node self.start_node =", "self.data = data self.ref = None class LinkedList: def __init__(self): self.start_node = None", "== value: node = node.ref valueFound = True new_node.ref = node node =", "def traverse_list(self): if self.start_node is None: print(\"List is empty\") else: node = self.start_node", "value: node = node.ref valueFound = True new_node.ref = node.ref node.ref = new_node", "print (\"value not present in the linked list\") return def insert_at_index(self, index, data):", "node.data == value: node = node.ref valueFound = True new_node.ref = node node", "None: print(\"List is empty\") else: node = self.start_node while(node is not None): print(node.data", "= self.start_node valueFound = False new_node = Node(data) while (node is not None):", "not None): node = node.ref i=i+1 if node is None: print('Index out of", "__init__(self, data): self.data = data self.ref = None class LinkedList: def __init__(self): self.start_node", "not None): node = node.ref node.ref = new_node def insert_after_item(self, value, data): if", "= data self.ref = None class LinkedList: def __init__(self): self.start_node = None def", "self.start_node = None def traverse_list(self): if self.start_node is None: print(\"List is empty\") else:", "if self.start_node is None: self.start_node = new_node return node = self.start_node while(node.ref is", "= node.ref node.ref = new_node def insert_after_item(self, value, data): if self.start_node is None:", "if self.start_node is None: print(\"List is empty\") else: node = self.start_node while(node is", "== 0: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return i=0", "a new node first new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node", "print(node.data , \" \") node = node.ref def insert_at_start(self, data): # create a", "new_node = Node(data) # traverse the list if self.start_node is None: self.start_node =", "print(\"empty list cannot insert before given value\") return if self.start_node.data == value: new_node", "node = self.start_node valueFound = False new_node = Node(data) while (node.ref is not", "= Node(data) new_node.ref = self.start_node self.start_node = new_node return node = self.start_node valueFound", "True new_node.ref = node.ref node.ref = new_node break if valueFound == False: print", "present in the linked list\") return def insert_before_item(self, value, data): if self.start_node is", "list cannot insert before given value\") return else: node = self.start_node valueFound =", "node.ref valueFound = True new_node.ref = node.ref node.ref = new_node break if valueFound", "i=i+1 if node is None: print('Index out of bound') return else: new_node =", "\" \") node = node.ref def insert_at_start(self, data): # create a new node", "Node(data) # traverse the list if self.start_node is None: self.start_node = new_node return", "None): node = node.ref i=i+1 if node is None: print('Index out of bound')", "== False: print (\"value not present in the linked list\") return def insert_before_item(self,", "is None: print(\"empty list cannot insert before given value\") return if self.start_node.data ==", "return def insert_at_index(self, index, data): if index == 0: new_node = Node(data) new_node.ref", "new_node return node = self.start_node while(node.ref is not None): node = node.ref node.ref", "out of bound') return else: new_node = Node(data) new_node.ref = node.ref node.ref =", "node = new_node break if valueFound == False: print (\"value not present in", "Node(data) new_node.ref = self.start_node self.start_node = new_node return node = self.start_node valueFound =", "node.data == value: node = node.ref valueFound = True new_node.ref = node.ref node.ref", "self.start_node self.start_node = new_node return node = self.start_node valueFound = False new_node =", "node is not None): node = node.ref i=i+1 if node is None: print('Index", "a new node first new_node = Node(data) # traverse the list if self.start_node", "self.start_node valueFound = False new_node = Node(data) while (node.ref is not None): if", "False: print (\"value not present in the linked list\") return def insert_at_index(self, index,", "valueFound = False new_node = Node(data) while (node is not None): if node.data", "0: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return i=0 node", "traverse_list(self): if self.start_node is None: print(\"List is empty\") else: node = self.start_node while(node", "insert before given value\") return if self.start_node.data == value: new_node = Node(data) new_node.ref", "= new_node break if valueFound == False: print (\"value not present in the", "is not None): print(node.data , \" \") node = node.ref def insert_at_start(self, data):", "# traverse the list if self.start_node is None: self.start_node = new_node return node", "node = node.ref i=i+1 if node is None: print('Index out of bound') return", "new_node.ref = node.ref node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10) new_linked_list.insert_at_end(15) new_linked_list.insert_at_start(20)", "node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10) new_linked_list.insert_at_end(15) new_linked_list.insert_at_start(20) new_linked_list.insert_at_start(40) new_linked_list.insert_at_index(3, 30)", "Node(data) new_node.ref = self.start_node self.start_node = new_node def insert_at_end(self, data): # create a", "the linked list\") return def insert_before_item(self, value, data): if self.start_node is None: print(\"empty", "traverse the list if self.start_node is None: self.start_node = new_node return node =", "return else: node = self.start_node valueFound = False new_node = Node(data) while (node", "node.ref node.ref = new_node break if valueFound == False: print (\"value not present", "self.start_node.data == value: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return", "is not None): node = node.ref i=i+1 if node is None: print('Index out", "def insert_at_start(self, data): # create a new node first new_node = Node(data) new_node.ref", "class Node: def __init__(self, data): self.data = data self.ref = None class LinkedList:", "= new_node def insert_after_item(self, value, data): if self.start_node is None: print(\"empty list cannot", "= self.start_node self.start_node = new_node return node = self.start_node valueFound = False new_node", "self.start_node self.start_node = new_node return i=0 node = self.start_node while(i<index and node is", "new_node = Node(data) new_node.ref = node.ref node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5)", "node.ref node.ref = new_node def insert_after_item(self, value, data): if self.start_node is None: print(\"empty", "def insert_at_index(self, index, data): if index == 0: new_node = Node(data) new_node.ref =", "Node: def __init__(self, data): self.data = data self.ref = None class LinkedList: def", "LinkedList: def __init__(self): self.start_node = None def traverse_list(self): if self.start_node is None: print(\"List", "if node is None: print('Index out of bound') return else: new_node = Node(data)", "not None): print(node.data , \" \") node = node.ref def insert_at_start(self, data): #", "while (node.ref is not None): if node.data == value: node = node.ref valueFound", "else: node = self.start_node while(node is not None): print(node.data , \" \") node", "False: print (\"value not present in the linked list\") return def insert_before_item(self, value,", "the linked list\") return def insert_at_index(self, index, data): if index == 0: new_node", "insert_at_start(self, data): # create a new node first new_node = Node(data) new_node.ref =", "= new_node return i=0 node = self.start_node while(i<index and node is not None):", "empty\") else: node = self.start_node while(node is not None): print(node.data , \" \")", ", \" \") node = node.ref def insert_at_start(self, data): # create a new", "None): if node.data == value: node = node.ref valueFound = True new_node.ref =", "Node(data) new_node.ref = node.ref node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10) new_linked_list.insert_at_end(15)", "# create a new node first new_node = Node(data) # traverse the list", "node = node.ref def insert_at_start(self, data): # create a new node first new_node", "None: self.start_node = new_node return node = self.start_node while(node.ref is not None): node", "= True new_node.ref = node node = new_node break if valueFound == False:", "node node = new_node break if valueFound == False: print (\"value not present", "list\") return def insert_before_item(self, value, data): if self.start_node is None: print(\"empty list cannot", "new_node.ref = node.ref node.ref = new_node break if valueFound == False: print (\"value", "node.ref valueFound = True new_node.ref = node node = new_node break if valueFound", "= self.start_node while(i<index and node is not None): node = node.ref i=i+1 if", "= self.start_node self.start_node = new_node return i=0 node = self.start_node while(i<index and node", "and node is not None): node = node.ref i=i+1 if node is None:", "= Node(data) while (node is not None): if node.data == value: node =", "node = self.start_node while(node is not None): print(node.data , \" \") node =", "def insert_at_end(self, data): # create a new node first new_node = Node(data) #", "insert_at_end(self, data): # create a new node first new_node = Node(data) # traverse", "== value: node = node.ref valueFound = True new_node.ref = node.ref node.ref =", "self.start_node = new_node return i=0 node = self.start_node while(i<index and node is not", "linked list\") return def insert_before_item(self, value, data): if self.start_node is None: print(\"empty list", "= node.ref i=i+1 if node is None: print('Index out of bound') return else:", "valueFound = False new_node = Node(data) while (node.ref is not None): if node.data", "self.start_node is None: print(\"empty list cannot insert before given value\") return if self.start_node.data", "create a new node first new_node = Node(data) # traverse the list if", "node = node.ref node.ref = new_node def insert_after_item(self, value, data): if self.start_node is", "= node node = new_node break if valueFound == False: print (\"value not", "return i=0 node = self.start_node while(i<index and node is not None): node =", "= Node(data) # traverse the list if self.start_node is None: self.start_node = new_node", "\") node = node.ref def insert_at_start(self, data): # create a new node first", "i=0 node = self.start_node while(i<index and node is not None): node = node.ref", "== False: print (\"value not present in the linked list\") return def insert_at_index(self,", "index == 0: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return", "new_node.ref = self.start_node self.start_node = new_node return node = self.start_node valueFound = False", "else: node = self.start_node valueFound = False new_node = Node(data) while (node is", "new_node break if valueFound == False: print (\"value not present in the linked", "first new_node = Node(data) # traverse the list if self.start_node is None: self.start_node", "= Node(data) while (node.ref is not None): if node.data == value: node =", "if node.data == value: node = node.ref valueFound = True new_node.ref = node", "value: node = node.ref valueFound = True new_node.ref = node node = new_node", "= self.start_node self.start_node = new_node def insert_at_end(self, data): # create a new node", "node = node.ref valueFound = True new_node.ref = node node = new_node break", "insert_before_item(self, value, data): if self.start_node is None: print(\"empty list cannot insert before given", "== value: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return node", "= node.ref valueFound = True new_node.ref = node.ref node.ref = new_node break if", "new_node.ref = node node = new_node break if valueFound == False: print (\"value", "data): # create a new node first new_node = Node(data) # traverse the", "first new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node def insert_at_end(self, data):", "print(\"empty list cannot insert before given value\") return else: node = self.start_node valueFound", "is not None): if node.data == value: node = node.ref valueFound = True", "valueFound = True new_node.ref = node node = new_node break if valueFound ==", "if self.start_node is None: print(\"empty list cannot insert before given value\") return else:", "(\"value not present in the linked list\") return def insert_before_item(self, value, data): if", "if self.start_node is None: print(\"empty list cannot insert before given value\") return if", "in the linked list\") return def insert_at_index(self, index, data): if index == 0:", "self.start_node while(i<index and node is not None): node = node.ref i=i+1 if node", "new_node.ref = self.start_node self.start_node = new_node return i=0 node = self.start_node while(i<index and", "bound') return else: new_node = Node(data) new_node.ref = node.ref node.ref = new_node new_linked_list", "= None def traverse_list(self): if self.start_node is None: print(\"List is empty\") else: node", "node = self.start_node while(node.ref is not None): node = node.ref node.ref = new_node", "print (\"value not present in the linked list\") return def insert_before_item(self, value, data):", "self.start_node self.start_node = new_node def insert_at_end(self, data): # create a new node first", "self.start_node is None: self.start_node = new_node return node = self.start_node while(node.ref is not", "node.ref = new_node def insert_after_item(self, value, data): if self.start_node is None: print(\"empty list", "False new_node = Node(data) while (node.ref is not None): if node.data == value:", "= True new_node.ref = node.ref node.ref = new_node break if valueFound == False:", "# create a new node first new_node = Node(data) new_node.ref = self.start_node self.start_node", "False new_node = Node(data) while (node is not None): if node.data == value:", "in the linked list\") return def insert_before_item(self, value, data): if self.start_node is None:", "print(\"List is empty\") else: node = self.start_node while(node is not None): print(node.data ,", "class LinkedList: def __init__(self): self.start_node = None def traverse_list(self): if self.start_node is None:", "the list if self.start_node is None: self.start_node = new_node return node = self.start_node", "None: print('Index out of bound') return else: new_node = Node(data) new_node.ref = node.ref", "data): if index == 0: new_node = Node(data) new_node.ref = self.start_node self.start_node =", "is not None): node = node.ref node.ref = new_node def insert_after_item(self, value, data):", "new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node def insert_at_end(self, data): #", "value\") return else: node = self.start_node valueFound = False new_node = Node(data) while", "node = self.start_node valueFound = False new_node = Node(data) while (node is not", "node.ref i=i+1 if node is None: print('Index out of bound') return else: new_node", "None: print(\"empty list cannot insert before given value\") return if self.start_node.data == value:", "= self.start_node valueFound = False new_node = Node(data) while (node.ref is not None):", "valueFound == False: print (\"value not present in the linked list\") return def", "new node first new_node = Node(data) # traverse the list if self.start_node is", "self.start_node is None: print(\"empty list cannot insert before given value\") return else: node", "cannot insert before given value\") return if self.start_node.data == value: new_node = Node(data)", "value\") return if self.start_node.data == value: new_node = Node(data) new_node.ref = self.start_node self.start_node", "node = node.ref valueFound = True new_node.ref = node.ref node.ref = new_node break", "= self.start_node while(node is not None): print(node.data , \" \") node = node.ref", "(\"value not present in the linked list\") return def insert_at_index(self, index, data): if", "break if valueFound == False: print (\"value not present in the linked list\")", "node is None: print('Index out of bound') return else: new_node = Node(data) new_node.ref", "while(node.ref is not None): node = node.ref node.ref = new_node def insert_after_item(self, value,", "before given value\") return else: node = self.start_node valueFound = False new_node =", "present in the linked list\") return def insert_at_index(self, index, data): if index ==", "self.start_node while(node is not None): print(node.data , \" \") node = node.ref def", "= Node(data) new_node.ref = node.ref node.ref = new_node new_linked_list = LinkedList() new_linked_list.insert_at_end(5) new_linked_list.insert_at_end(10)", "given value\") return else: node = self.start_node valueFound = False new_node = Node(data)", "new_node return i=0 node = self.start_node while(i<index and node is not None): node", "not present in the linked list\") return def insert_before_item(self, value, data): if self.start_node", "if valueFound == False: print (\"value not present in the linked list\") return", "= new_node return node = self.start_node valueFound = False new_node = Node(data) while", "def insert_after_item(self, value, data): if self.start_node is None: print(\"empty list cannot insert before", "index, data): if index == 0: new_node = Node(data) new_node.ref = self.start_node self.start_node", "new_node def insert_at_end(self, data): # create a new node first new_node = Node(data)", "self.ref = None class LinkedList: def __init__(self): self.start_node = None def traverse_list(self): if", "self.start_node = new_node def insert_at_end(self, data): # create a new node first new_node", "new_node.ref = self.start_node self.start_node = new_node def insert_at_end(self, data): # create a new", "= self.start_node while(node.ref is not None): node = node.ref node.ref = new_node def", "not None): if node.data == value: node = node.ref valueFound = True new_node.ref", "True new_node.ref = node node = new_node break if valueFound == False: print", "list cannot insert before given value\") return if self.start_node.data == value: new_node =", "valueFound = True new_node.ref = node.ref node.ref = new_node break if valueFound ==", "node = self.start_node while(i<index and node is not None): node = node.ref i=i+1", "given value\") return if self.start_node.data == value: new_node = Node(data) new_node.ref = self.start_node", "while(i<index and node is not None): node = node.ref i=i+1 if node is", "else: new_node = Node(data) new_node.ref = node.ref node.ref = new_node new_linked_list = LinkedList()", "= Node(data) new_node.ref = self.start_node self.start_node = new_node return i=0 node = self.start_node", "= False new_node = Node(data) while (node is not None): if node.data ==", "new_node def insert_after_item(self, value, data): if self.start_node is None: print(\"empty list cannot insert", "not present in the linked list\") return def insert_at_index(self, index, data): if index", "def __init__(self, data): self.data = data self.ref = None class LinkedList: def __init__(self):", "data): # create a new node first new_node = Node(data) new_node.ref = self.start_node", "print('Index out of bound') return else: new_node = Node(data) new_node.ref = node.ref node.ref", "cannot insert before given value\") return else: node = self.start_node valueFound = False", "list\") return def insert_at_index(self, index, data): if index == 0: new_node = Node(data)", "Node(data) while (node.ref is not None): if node.data == value: node = node.ref", "new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node return i=0 node =", "None): print(node.data , \" \") node = node.ref def insert_at_start(self, data): # create", "if node.data == value: node = node.ref valueFound = True new_node.ref = node.ref", "while(node is not None): print(node.data , \" \") node = node.ref def insert_at_start(self,", "is None: print(\"empty list cannot insert before given value\") return else: node =", "None: print(\"empty list cannot insert before given value\") return else: node = self.start_node", "if index == 0: new_node = Node(data) new_node.ref = self.start_node self.start_node = new_node", "linked list\") return def insert_at_index(self, index, data): if index == 0: new_node =", "before given value\") return if self.start_node.data == value: new_node = Node(data) new_node.ref =", "None): node = node.ref node.ref = new_node def insert_after_item(self, value, data): if self.start_node", "(node.ref is not None): if node.data == value: node = node.ref valueFound =", "node first new_node = Node(data) # traverse the list if self.start_node is None:", "def __init__(self): self.start_node = None def traverse_list(self): if self.start_node is None: print(\"List is", "= new_node def insert_at_end(self, data): # create a new node first new_node =", "insert_at_index(self, index, data): if index == 0: new_node = Node(data) new_node.ref = self.start_node", "self.start_node = new_node return node = self.start_node valueFound = False new_node = Node(data)", "(node is not None): if node.data == value: node = node.ref valueFound =", "new_node = Node(data) while (node.ref is not None): if node.data == value: node", "= None class LinkedList: def __init__(self): self.start_node = None def traverse_list(self): if self.start_node", "self.start_node while(node.ref is not None): node = node.ref node.ref = new_node def insert_after_item(self,", "node.ref def insert_at_start(self, data): # create a new node first new_node = Node(data)" ]
[ "import fileinput for line in fileinput.input(): try: host, rest = line.strip().split(\")\", 1) host", "python3 import fileinput for line in fileinput.input(): try: host, rest = line.strip().split(\")\", 1)", "= line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\") except BrokenPipeError: break except:", "for line in fileinput.input(): try: host, rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\")))", "rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\") except BrokenPipeError: break", "1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\") except BrokenPipeError: break except: print(line, end=\"\")", "line in fileinput.input(): try: host, rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest", "line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\") except BrokenPipeError: break except: print(line,", "#!/usr/bin/env python3 import fileinput for line in fileinput.input(): try: host, rest = line.strip().split(\")\",", "fileinput for line in fileinput.input(): try: host, rest = line.strip().split(\")\", 1) host =", "in fileinput.input(): try: host, rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or", "fileinput.input(): try: host, rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\")", "host, rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\") except BrokenPipeError:", "try: host, rest = line.strip().split(\")\", 1) host = \".\".join(reversed(host.strip(\",\").split(\",\"))) print(f\"https://{host}{rest or '/'}\") except" ]
[ "pred_w = preds.size(2), preds.size(3) assert pred_h == h and pred_w == w loss", "= 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w, num_cls)", "= input_label[valid_flag] num_valid = valid_flag.sum() if num_valid > 0: prob = input_prob[:, valid_flag]", "candidate anchor pixels seg_num_list = [] # the number of low_valid pixels in", "# (num_labeled, h, w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u =", "= np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold > self.thresh: threshold = new_threshold", "-1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid", "reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept)", "valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid =", "input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0]", "not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0, 0.0, 0.0,", "int(self.min_kept_ratio * n * h * w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict,", "dim=1) entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh = np.percentile( entropy[target", "= float(thresh) self.min_kept = int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self,", "in each class seg_proto_list = [] # the center of each class _,", ") return new_target def forward(self, predict, target, weight=None): \"\"\" Args: predict:(n, c, h,", "255 weight = batch_size * h * w / torch.sum(target != 255) loss", "(num_queries, 1 + num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss", "self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor = factor self.criterion", "nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1) target = nd.zoom(np_target,", "= predict.shape temp_tar = target.clone() temp_tar[target == 255] = 0 label = (", "torch.no_grad(): # drop pixels with high entropy prob = torch.softmax(pred_teacher, dim=1) entropy =", "pixels seg_num_list = [] # the number of low_valid pixels in each class", "label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ):", "1 + num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss =", "given, has to be a Tensor of size \"nclasses\" \"\"\" assert not target.requires_grad", "1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0,", "0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss(", "target.view(-1) valid_mask = target.ne(self.ignore_index) target = target * valid_mask.long() num_valid = valid_mask.sum() prob", "for labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0)", "weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion", "reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index", "1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls)", "mini-batch might only contain 1 or no semantic class if momentum_prototype is None:", "main_w == aux_w and main_h == h and main_w == w ) loss1", "valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target = target.view(b, h, w) return", "!= 255) loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321]", "> 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1 ): # in some", "weight=None): \"\"\" Args: predict:(n, c, h, w) target:(n, h, w) weight (Tensor, optional):", "\"nclasses\" \"\"\" assert not target.requires_grad input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob, target)", "num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel", "(batch, h, w, num_cls) label = rearrange(label, \"b h w c -> b", "h and main_w == w ) if self.use_weight: loss1 = self._criterion(main_pred, target) +", ").cuda() # weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333,", "order=0) n, c, h, w = predict.shape min_kept = self.min_kept // ( factor", "is None: return new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) *", "size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat", "high entropy prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob + 1e-10),", "= input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) )", "== aux_h and main_w == aux_w and main_h == h and main_w ==", "mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept > 0: _,", ") if momentum_prototype is None: return new_keys, reco_loss / valid_seg else: return prototype,", "num_negatives, num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda()", "all queries in the current query class are easy reco_loss = reco_loss +", "rare cases, a small mini-batch might only contain 1 or no semantic class", "low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) #", "* valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c,", "= input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >= num_valid: threshold = 1.0 elif", "# candidate anchor pixels seg_num_list = [] # the number of low_valid pixels", "torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:, i] ==", "assert pred_h == h and pred_w == w loss = self._criterion(preds, target) return", "= F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h, w = predict.shape temp_tar =", "ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target):", "target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds =", "elif num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]", "= prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l", "einops import rearrange predict = F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h, w", "new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def forward(self, predict, target,", "self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index )", "valid_seg else: return prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"]", "prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept >", "each class. If given, has to be a Tensor of size \"nclasses\" \"\"\"", "valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1)", "== w ) loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss =", "range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th class high_valid_pixel_seg", "-torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool() return rce.sum() / (target !=", "if momentum_prototype is not None: if not (momentum_prototype == 0).all(): ema_decay = min(1", "/ torch.sum(target != 255) loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10,", "# downsample 1/8 factor = self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 /", "momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1 )", "prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate", "aux_w and main_h == h and main_w == w ) loss1 = self._criterion1(main_pred,", "= positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries, 1 +", "num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments", "criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight", "b c h w\") label = torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict *", "cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion =", "use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept =", "negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0)", "new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item()))", "w) target:(n, h, w) weight (Tensor, optional): a manual rescaling weight given to", "(prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid", "max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool() return rce.sum()", "== 255] = 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h,", "= target.data.cpu().numpy() n, c, h, w = np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label", "given to each class. If given, has to be a Tensor of size", "thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh)", "\"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index", "cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion", "label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0,", "(class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy *", "i] == 0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy * class_mask keys =", "cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 )", "( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def forward(self, predict, target, weight=None): \"\"\"", "momentum_prototype is not None: if not (momentum_prototype == 0).all(): ema_decay = min(1 -", "= OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target): h, w = target.size(1), target.size(2)", "F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return new_keys, reco_loss", "size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare cases,", "= cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index", "new_array = np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold > self.thresh: threshold =", "might only contain 1 or no semantic class if momentum_prototype is None: return", "number of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for", "super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor =", "reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred,", "!= 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] =", "0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 =", "* high_mask rep = rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0, 2, 3,", "1.0, 1.0 / factor, 1.0 / factor), order=1) target = nd.zoom(np_target, (1.0, 1.0", "cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index =", "0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5,", "int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample", "assert not target.requires_grad input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return self.criterion(predict,", ") ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1", "np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label =", "valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i in", "0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\",", "loss = loss1 + self._aux_weight * loss2 else: pred_h, pred_w = preds.size(2), preds.size(3)", "= factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample 1/8 factor", "1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1", "class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask", "target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h, w = np_predict.shape", "= target * kept_mask.long() valid_mask = valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index)", "w = pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index) target = target *", "(label_l[:, i] == 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :,", "num_labeled] * (label_l[:, i] == 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:,", "* low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask rep = rep.permute(0, 2,", "# generate class mask for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u", "torch import torch.nn as nn from torch.nn import functional as F from .utils", "3, 1) rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list =", "seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss + F.cross_entropy( seg_logits", "= main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2", "[] # the center of each class _, prob_indices_l = torch.sort(prob_l, 1, True)", "self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample 1/8 factor = self.factor", "0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w, num_cls) label", "# in some rare cases, a small mini-batch might only contain 1 or", "positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries, 1 + num_negative,", "the current query class are easy reco_loss = reco_loss + 0 * rep.sum()", "h and pred_w == w loss = self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module):", "rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) *", "input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold valid_inds", "import numpy as np import scipy.ndimage as nd import torch import torch.nn as", "a Tensor of size \"nclasses\" \"\"\" assert not target.requires_grad input_prob = F.softmax(predict, 1)", "pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h == h and pred_w == w", "no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries *", "if self._aux_weight > 0: # require aux loss main_pred, aux_pred = preds main_h,", "temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return new_keys, reco_loss / valid_seg else:", "ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight = use_weight", "* low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy])", "= torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,", "* (label_l[:, i] == 0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy * class_mask", "pred_h == h and pred_w == w loss = self._criterion(preds, target) return loss", "input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def", "( len(preds) == 2 and main_h == aux_h and main_w == aux_w and", "# label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0) # prob_i_classes =", "1) rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list = []", "= torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask", "== h and pred_w == w loss = self._criterion1(preds, target) return loss class", "= -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool() return rce.sum() / (target", "import torch import torch.nn as nn from torch.nn import functional as F from", "low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask rep = rep.permute(0, 2, 3,", "if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0, 0.0,", "cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] ==", "= self.min_kept // ( factor * factor ) # int(self.min_kept_ratio * n *", "queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1)", "require aux loss main_pred, aux_pred = preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h,", "negative key sampling from memory bank (with no gradients) with torch.no_grad(): negative_feat =", "self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target): b, c, h,", "= self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor),", "anchor pixels seg_num_list = [] # the number of low_valid pixels in each", "class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:,", "self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version", "aux_weight self._ignore_index = ignore_index self.use_weight = use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)", ") reco_loss = reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype", "kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target = target.view(b, h, w) return self.criterion(pred, target)", "= torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i in range(valid_seg): if (", "__init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight", "ema_decay = min(1 - 1 / i_iter, 0.999) positive_feat = ( 1 -", "self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob =", "query class are easy reco_loss = reco_loss + 0 * rep.sum() continue #", "= cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] )", "ignore_index, thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds,", "1.0865, 1.1529, 1.0507, ] ).cuda() # weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762,", "torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0,", "import torch.nn as nn from torch.nn import functional as F from .utils import", "0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286,", "current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"],", "* momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1", "np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label !=", "if self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob", "aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh,", "torch.nn as nn from torch.nn import functional as F from .utils import dequeue_and_enqueue", "self.min_kept = int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target):", "[] for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask", "queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <=", "has to be a Tensor of size \"nclasses\" \"\"\" assert not target.requires_grad input_prob", "= ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def forward(self, predict, target, weight=None):", "import rearrange predict = F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h, w =", "[] new_keys = [] for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] #", "2, 3, 1 ) # (num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l, prob_u),", "target * valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1) prob = (prob.transpose(0,", "0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda()", "F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w, num_cls) label = rearrange(label, \"b h", ":, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:, i] == 0), class_mask_u),", "threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target * kept_mask.long() valid_mask =", "0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh", "num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss + F.cross_entropy(", "label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel =", "np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1))", "reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target): b, c, h, w = pred.size()", "return new_keys, reco_loss / valid_seg else: return prototype, new_keys, reco_loss / valid_seg def", "/ i_iter, 0.999) positive_feat = ( 1 - ema_decay ) * positive_feat +", "np_predict, np_target): # downsample 1/8 factor = self.factor predict = nd.zoom(np_predict, (1.0, 1.0,", "(0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank", "1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion", "= rearrange(label, \"b h w c -> b c h w\") label =", "= input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum()", "prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if", "3, 1 ) # (num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0)", "range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0 ): # select", "( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold", "1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def", "loss2 = self._criterion(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else: pred_h,", "class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label", "num_valid = valid_flag.sum() if num_valid > 0: prob = input_prob[:, valid_flag] pred =", "= label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def forward(self,", "generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h, w", "thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target):", "= index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask", "self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred, target ) else: loss1 =", "new_threshold = new_array[k_th] if new_threshold > self.thresh: threshold = new_threshold return threshold def", "w = predict.shape temp_tar = target.clone() temp_tar[target == 255] = 0 label =", "label = input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >= num_valid: threshold = 1.0", "] ).cuda() # weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762,", "= float(thresh) self.min_kept = int(min_kept) if use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918,", "0 and memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor pixel seg_low_entropy_idx = torch.randint(", "0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() # weight = torch.FloatTensor( #", "w, num_cls) label = rearrange(label, \"b h w c -> b c h", "main_w == aux_w and main_h == h and main_w == w ) if", "input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return", "= ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if", "1.0 elif num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label),", "def forward(self, predict, target, weight=None): \"\"\" Args: predict:(n, c, h, w) target:(n, h,", "threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag", "1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ]", "be a Tensor of size \"nclasses\" \"\"\" assert not target.requires_grad input_prob = F.softmax(predict,", "= self.thresh if min_kept > 0: k_th = min(len(pred), min_kept) - 1 new_array", "dim=2 ) reco_loss = reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if", "prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l *", "None: return new_keys, reco_loss / valid_seg else: return prototype, new_keys, reco_loss / valid_seg", "prob_seg = prob[:, i, :, :] rep_mask_low_entropy = ( prob_seg > current_class_threshold )", "= np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() * (target !=", "# print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob =", "= entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] = 255 weight = batch_size *", "self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss = loss1 + self._aux_weight * loss2", "prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = (", "def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size,", "as nd import torch import torch.nn as nn from torch.nn import functional as", "labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0) #", "10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion", "num_cls, h, w) valid_classes = [] new_keys = [] for i in range(num_segments):", "dtype=torch.long)] threshold = self.thresh if self.min_kept > 0: _, index = mask_prob.sort() threshold_index", "target) loss2 = self._criterion(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else:", "# (num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls,", "= np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label", "= torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target): b, c, h, w", "= ( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg <", "\"\"\" Args: predict:(n, c, h, w) target:(n, h, w) weight (Tensor, optional): a", "True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls) _,", "target * kept_mask.long() valid_mask = valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target", "batch_size, num_class, h, w = predict.shape with torch.no_grad(): # drop pixels with high", "threshold def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c,", "return new_target def forward(self, predict, target, weight=None): \"\"\" Args: predict:(n, c, h, w)", "use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0,", "= self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8):", "import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import rearrange predict = F.softmax(predict, dim=1)", "/ valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\",", "generate class mask for labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:,", "thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] = 255 weight = batch_size", "rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate class mask for unlabeled data #", ") elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss(", "in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th class", "dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import rearrange predict = F.softmax(predict, dim=1) with", "torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat", "downsample 1/8 factor = self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor,", "self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version \"\"\" def __init__(", ":, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask for labeled data # label_l_mask", "= weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321] return loss def", "input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module):", "for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :,", "# select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = (", "cases, all queries in the current query class are easy reco_loss = reco_loss", "seg_proto_list = [] # the center of each class _, prob_indices_l = torch.sort(prob_l,", "compute_rce_loss(predict, target): from einops import rearrange predict = F.softmax(predict, dim=1) with torch.no_grad(): _,", "weight given to each class. If given, has to be a Tensor of", ") * positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat", "or no semantic class if momentum_prototype is None: return new_keys, torch.tensor(0.0) * rep.sum()", "np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool()", "classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i in range(valid_seg):", "target:(n, h, w) weight (Tensor, optional): a manual rescaling weight given to each", "data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i),", "predict, target, weight=None): \"\"\" Args: predict:(n, c, h, w) target:(n, h, w) weight", "* loss2 else: pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h == h and", "factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample 1/8 factor =", "( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"]", "= cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"]", "main_pred, aux_pred = preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2),", "factor * factor ) # int(self.min_kept_ratio * n * h * w) input_label", "predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h, w = np_predict.shape threshold = self.find_threshold(np_predict,", "input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept >", "* factor ) # int(self.min_kept_ratio * n * h * w) input_label =", "< current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the", ") self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target): h, w =", "] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self,", "2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list = [] # candidate anchor pixels", "rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], )", "input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target", "return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label", "= prob_indices_u.permute( 0, 2, 3, 1 ) # (num_unlabeled, h, w, num_cls) prob", "= ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w, num_cls) label = rearrange(label,", "rare cases, all queries in the current query class are easy reco_loss =", "] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries,", "0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1 ): # in some rare", "rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h,", "> 0 and memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor pixel seg_low_entropy_idx =", "n, c, h, w = predict.shape min_kept = self.min_kept // ( factor *", "0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,", "use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0,", "-> b c h w\") label = torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict", "0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,", "w) valid_classes = [] new_keys = [] for i in range(num_segments): low_valid_pixel_seg =", "rep_mask_low_entropy = ( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg", "1 ): # in some rare cases, a small mini-batch might only contain", "high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:, i, :, :] rep_mask_low_entropy = (", "prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold:", "= reco_loss + 0 * rep.sum() continue # apply negative key sampling from", "self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh", "torch.log(prob + 1e-10), dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask", "= new_threshold return threshold def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target =", "# (num_queries, 1, num_feat) if momentum_prototype is not None: if not (momentum_prototype ==", "reco_loss = reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is", "np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept > 0: k_th = min(len(pred), min_kept)", ") positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) #", "class seg_proto_list = [] # the center of each class _, prob_indices_l =", "self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >=", "input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label", "valid_mask.sum() prob = F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept >", "seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg = len(seg_num_list) # number of", "target = target * valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1) prob", "_, index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] >", "(positive_feat, negative_feat), dim=1 ) # (num_queries, 1 + num_negative, num_feat) seg_logits = torch.cosine_similarity(", "= rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list = [] # candidate", "F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h, w = predict.shape temp_tar = target.clone()", "if ( len(seg_num_list) <= 1 ): # in some rare cases, a small", ") anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare cases, all", ".utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import rearrange predict = F.softmax(predict,", "> 0: # require aux loss main_pred, aux_pred = preds main_h, main_w =", "are easy reco_loss = reco_loss + 0 * rep.sum() continue # apply negative", "negative_feat), dim=1 ) # (num_queries, 1 + num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1),", "num_cls) prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w) valid_classes =", "* n * h * w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c,", "h, w = predict.shape with torch.no_grad(): # drop pixels with high entropy prob", "( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor pixel", "class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if", "label = rearrange(label, \"b h w c -> b c h w\") label", "h, w = predict.shape min_kept = self.min_kept // ( factor * factor )", "321] return loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg,", "prob_u), dim=0) # (batch_size, num_cls, h, w) valid_classes = [] new_keys = []", "pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index) target = target * valid_mask.long() num_valid", "= target.view(-1) valid_mask = target.ne(self.ignore_index) target = target * valid_mask.long() num_valid = valid_mask.sum()", "keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum()", "ignore_index self.use_weight = use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights =", "predict.shape min_kept = self.min_kept // ( factor * factor ) # int(self.min_kept_ratio *", "else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto =", "dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() *", "np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label =", "[valid_seg, 256] valid_seg = len(seg_num_list) # number of valid classes prototype = torch.zeros(", "target) loss2 = self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else:", "return loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank,", "0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred", "each class _, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3,", "new_target def forward(self, predict, target, weight=None): \"\"\" Args: predict:(n, c, h, w) target:(n,", "num_class, h, w = predict.shape with torch.no_grad(): # drop pixels with high entropy", "torch.sum(target != 255) loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321,", "prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh", "min_kept) - 1 new_array = np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold >", "criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def __init__(self,", "1.0507, ] ).cuda() # weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111,", "positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat(", "2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion =", "thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept =", "Cross Entropy Tensor Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False", "# prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask =", "_, num_cls, h, w = predict.shape temp_tar = target.clone() temp_tar[target == 255] =", "predict, target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h, w =", "binary mask for i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:, i,", "(Tensor, optional): a manual rescaling weight given to each class. If given, has", "criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight, ignore_index=ignore_index,", "num_cls, h, w = predict.shape temp_tar = target.clone() temp_tar[target == 255] = 0", ") if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1 ):", "\"b h w c -> b c h w\") label = torch.clamp(label, min=1e-4,", "min_kept >= num_valid: threshold = 1.0 elif num_valid > 0: prob = input_prob[:,", "negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat = (", "num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask", "reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg = len(seg_num_list)", "= prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept", "factor = self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 /", "rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list = [] # candidate anchor", "aux_h and main_w == aux_w and main_h == h and main_w == w", "( 1 - ema_decay ) * positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ]", "1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0,", "(1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp", ".cuda() ) # (num_queries, 1, num_feat) if momentum_prototype is not None: if not", "torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample 1/8 factor = self.factor predict =", "= ignore_index self.use_weight = use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights", "select binary mask for i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:,", "else: weights = torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0,", "== \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion(", "order=1) target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0) n,", "CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight", "return criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight =", "!= 255).bool() return rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher):", "= self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor", "small mini-batch might only contain 1 or no semantic class if momentum_prototype is", "+ 1e-10), dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask =", "target ) else: loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss =", "h, w) weight (Tensor, optional): a manual rescaling weight given to each class.", "= prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() #", "low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1 ): # in", "1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0,", "factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor", "current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the class", "loss = self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7,", "i, :, :] rep_mask_low_entropy = ( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy", "(1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1) target = nd.zoom(np_target, (1.0,", "1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept > 0:", "= self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000,", "= torch.cat( (class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0 ) negative_mask =", "cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0]", ":]] class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class", "thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() * (target", "seg_feat_low_entropy_list = [] # candidate anchor pixels seg_num_list = [] # the number", "= torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w) valid_classes = [] new_keys", "i_iter, 0.999) positive_feat = ( 1 - ema_decay ) * positive_feat + ema_decay", "self._criterion1( main_pred, target ) else: loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target)", "= preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert", "w ) loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss = loss1", "def forward(self, pred, target): b, c, h, w = pred.size() target = target.view(-1)", "h, w = np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob =", "255) loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321] return", "1.0, 1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights", "input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if", "= torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def", "and main_h == h and main_w == w ) loss1 = self._criterion1(main_pred, target)", "w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label !=", "to each class. If given, has to be a Tensor of size \"nclasses\"", "w = predict.shape min_kept = self.min_kept // ( factor * factor ) #", "prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask for labeled data", "0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() # weight =", "rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list = [] #", "torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss + F.cross_entropy( seg_logits / temp,", "\"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight,", "momentum_prototype is None: return new_keys, reco_loss / valid_seg else: return prototype, new_keys, reco_loss", "0: # require aux loss main_pred, aux_pred = preds main_h, main_w = main_pred.size(2),", "as F from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import rearrange", "self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept)", "# number of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda()", "c, h, w = np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob", "= torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1) * (target !=", "low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th class high_valid_pixel_seg =", "def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index =", "functional as F from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import", "OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target): h, w = target.size(1), target.size(2) if", "= F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\"", "cases, a small mini-batch might only contain 1 or no semantic class if", "dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0 )", "main_h == h and main_w == w ) if self.use_weight: loss1 = self._criterion(main_pred,", "= target * valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1) prob =", "with torch.no_grad(): # drop pixels with high entropy prob = torch.softmax(pred_teacher, dim=1) entropy", "(1.0, 1.0 / factor, 1.0 / factor), order=0) n, c, h, w =", "predict.shape temp_tar = target.clone() temp_tar[target == 255] = 0 label = ( F.one_hot(temp_tar.clone().detach(),", "np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >= num_valid: threshold =", ") else: # in some rare cases, all queries in the current query", "* h * w / torch.sum(target != 255) loss = weight * F.cross_entropy(predict,", "torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg = len(seg_num_list) # number of valid classes", ") else: loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss = loss1", "* torch.log(label), dim=1) * (target != 255).bool() return rce.sum() / (target != 255).sum()", "reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else:", "new_keys = [] for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select", "if new_threshold > self.thresh: threshold = new_threshold return threshold def generate_new_target(self, predict, target):", "/ factor, 1.0 / factor), order=1) target = nd.zoom(np_target, (1.0, 1.0 / factor,", "label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w, num_cls) label =", "0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion", "self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255,", "dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid: pass # print('Labels:", "Ohem Cross Entropy Tensor Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False,", "num_queries, num_negatives, num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1)", "num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0, 2, 3,", "torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w,", "torch.cat((label_l, label_u), dim=0) * high_mask rep = rep.permute(0, 2, 3, 1) rep_teacher =", "anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda()", "= np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds", "optional): a manual rescaling weight given to each class. If given, has to", "self._criterion(main_pred, target) + self._criterion1( main_pred, target ) else: loss1 = self._criterion(main_pred, target) loss2", "# the number of low_valid pixels in each class seg_proto_list = [] #", "2, 3, 1) rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list", "mask_prob.le(threshold) target = target * kept_mask.long() valid_mask = valid_mask * kept_mask target =", "low_valid_pixel[:, i] # select binary mask for i-th class high_valid_pixel_seg = high_valid_pixel[:, i]", "seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: #", "c, h, w = pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index) target =", "data # label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0) # prob_i_classes", "self.thresh if self.min_kept > 0: _, index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept)", "high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask rep = rep.permute(0, 2, 3, 1)", "= int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): #", "the center of each class _, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l =", "0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() # weight", "torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1 ) # (num_unlabeled,", "pass # print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob", "class if momentum_prototype is None: return new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype,", "= aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index,", "entropy prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1)", "> num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob = prob.masked_fill_(~valid_mask,", "rep.sum() continue # apply negative key sampling from memory bank (with no gradients)", "{}'.format(num_valid)) elif num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target),", "ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat),", "not target.requires_grad input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return self.criterion(predict, target)", "and main_w == w ) if self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1(", "torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg,", "self.min_kept = int(min_kept) if use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345,", "if num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)]", "class_mask = torch.cat( (class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0 ) negative_mask", "min_kept = self.min_kept // ( factor * factor ) # int(self.min_kept_ratio * n", "len(preds) == 2 and main_h == aux_h and main_w == aux_w and main_h", "def forward(self, preds, target): h, w = target.size(1), target.size(2) if self._aux_weight > 0:", "no semantic class if momentum_prototype is None: return new_keys, torch.tensor(0.0) * rep.sum() else:", "i] == 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i),", "with torch.no_grad(): _, num_cls, h, w = predict.shape temp_tar = target.clone() temp_tar[target ==", "num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l,", "# (batch, h, w, num_cls) label = rearrange(label, \"b h w c ->", "1) seg_feat_all_list = [] seg_feat_low_entropy_list = [] # candidate anchor pixels seg_num_list =", "threshold = self.thresh if self.min_kept > 0: _, index = mask_prob.sort() threshold_index =", "label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0)", "+ self._criterion1( main_pred, target ) else: loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred,", "self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def", ") thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] = 255 weight =", "factor), order=0) n, c, h, w = predict.shape min_kept = self.min_kept // (", "/ temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return new_keys, reco_loss / valid_seg", "prob = F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid:", "min_kept > 0: k_th = min(len(pred), min_kept) - 1 new_array = np.partition(pred, k_th)", "0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116,", "target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__()", "aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"]", "h * w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag =", "**cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class", "class are easy reco_loss = reco_loss + 0 * rep.sum() continue # apply", "class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__()", "np_target = target.data.cpu().numpy() n, c, h, w = np_predict.shape threshold = self.find_threshold(np_predict, np_target)", "== w loss = self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def __init__( self,", "[0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, #", "self.thresh if min_kept > 0: k_th = min(len(pred), min_kept) - 1 new_array =", ".repeat(num_queries, 1, 1) .cuda() ) # (num_queries, 1, num_feat) if momentum_prototype is not", ".unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) # (num_queries, 1, num_feat) if momentum_prototype", "prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid))", "elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\",", "255).bool() target[thresh_mask] = 255 weight = batch_size * h * w / torch.sum(target", "target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version \"\"\"", "loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis,", "prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept > 0: k_th = min(len(pred),", "momentum_prototype is None: return new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0)", "loss = self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000,", "center of each class _, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0,", "h, w) valid_classes = [] new_keys = [] for i in range(num_segments): low_valid_pixel_seg", "+ 0 * rep.sum() continue # apply negative key sampling from memory bank", "dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i)", "= self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight *", "w loss = self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7,", "i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:, i, :, :] rep_mask_low_entropy", "255).bool() return rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size,", "= cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled =", "target.requires_grad input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return self.criterion(predict, target) class", "valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >= num_valid:", "entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask]", "dim=1) with torch.no_grad(): _, num_cls, h, w = predict.shape temp_tar = target.clone() temp_tar[target", "memory bank (with no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint(", "== 0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach()", "aux loss main_pred, aux_pred = preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w", "0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507,", "w\") label = torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1) *", "): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept) if", "prob_indices_u.permute( 0, 2, 3, 1 ) # (num_unlabeled, h, w, num_cls) prob =", ") # (num_queries, 1 + num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2", "1/8 factor = self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0", "prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <=", "all_feat, dim=2 ) reco_loss = reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() )", "rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold", "# [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882,", "c, h, w) target:(n, h, w) weight (Tensor, optional): a manual rescaling weight", "def compute_rce_loss(predict, target): from einops import rearrange predict = F.softmax(predict, dim=1) with torch.no_grad():", "new_threshold > self.thresh: threshold = new_threshold return threshold def generate_new_target(self, predict, target): np_predict", "anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() )", "not (momentum_prototype == 0).all(): ema_decay = min(1 - 1 / i_iter, 0.999) positive_feat", "torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries,", "cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries", "= valid_flag.sum() if num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label,", "= valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size()))", "# 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index", "+ num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss", "label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask rep =", "h, w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w)", "num_valid = valid_flag.sum() if min_kept >= num_valid: threshold = 1.0 elif num_valid >", "= torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023,", "factor ) # int(self.min_kept_ratio * n * h * w) input_label = target.ravel().astype(np.int32)", ") ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index,", "): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold", "self._aux_weight > 0: # require aux loss main_pred, aux_pred = preds main_h, main_w", "main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds)", "torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh = np.percentile(", "seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1 ): # in some rare cases,", "ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept", "num_cls).float().cuda() ) # (batch, h, w, num_cls) label = rearrange(label, \"b h w", "1e-10), dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool()", "h, w = pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index) target = target", "np_target): # downsample 1/8 factor = self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0", "c, h, w = predict.shape min_kept = self.min_kept // ( factor * factor", "rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0:", "np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h, w = np_predict.shape threshold", "3, 1) seg_feat_all_list = [] seg_feat_low_entropy_list = [] # candidate anchor pixels seg_num_list", "len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor pixel seg_low_entropy_idx", "(num_queries, 1, num_feat) if momentum_prototype is not None: if not (momentum_prototype == 0).all():", "return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list)", "def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else", "= ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept) if use_weight: weight = torch.FloatTensor(", "(target != 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h, w =", "pred, target): b, c, h, w = pred.size() target = target.view(-1) valid_mask =", "= prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold valid_inds = valid_inds[kept_flag] label", "( factor * factor ) # int(self.min_kept_ratio * n * h * w)", "= np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label", ") if self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred, target ) else:", "= mask_prob.le(threshold) target = target * kept_mask.long() valid_mask = valid_mask * kept_mask target", ") # (num_queries, 1, num_feat) if momentum_prototype is not None: if not (momentum_prototype", "\"\"\" Ohem Cross Entropy Tensor Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256,", "/ (target != 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h, w", "for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0", "# current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank =", "0: k_th = min(len(pred), min_kept) - 1 new_array = np.partition(pred, k_th) new_threshold =", "aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2 and main_h == aux_h", "compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h, w = predict.shape with torch.no_grad(): #", "int(min_kept) if use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969,", "i] prob_seg = prob[:, i, :, :] rep_mask_low_entropy = ( prob_seg > current_class_threshold", "valid_mask = target.ne(self.ignore_index) target = target * valid_mask.long() num_valid = valid_mask.sum() prob =", "valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = (", "main_pred, target ) else: loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss", "target): h, w = target.size(1), target.size(2) if self._aux_weight > 0: # require aux", "loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label =", "= self._criterion(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else: pred_h, pred_w", "prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept > 0: _, index =", "torch.cat( (class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy", "= input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept", "preds.size(3) assert pred_h == h and pred_w == w loss = self._criterion1(preds, target)", "_, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) #", "main_h == aux_h and main_w == aux_w and main_h == h and main_w", "= cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments =", "label_u), dim=0) * high_mask rep = rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0,", "queries in the current query class are easy reco_loss = reco_loss + 0", "low_valid pixels in each class seg_proto_list = [] # the center of each", ".unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) # (num_queries, 1, num_feat) if momentum_prototype is", "loss1 + self._aux_weight * loss2 else: pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h", "= self.thresh if self.min_kept > 0: _, index = mask_prob.sort() threshold_index = index[min(len(index),", "[ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843,", "else: return prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight", ") # (batch, h, w, num_cls) label = rearrange(label, \"b h w c", "w loss = self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def __init__( self, aux_weight,", "and memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]),", "1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss(", "aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight =", "in some rare cases, a small mini-batch might only contain 1 or no", "# int(self.min_kept_ratio * n * h * w) input_label = target.ravel().astype(np.int32) input_prob =", "torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss =", "pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold valid_inds = valid_inds[kept_flag]", "min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool() return", "256] valid_seg = len(seg_num_list) # number of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1],", "num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss +", "low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask for labeled data # label_l_mask =", "new_threshold return threshold def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy()", "= int(min_kept) if use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166,", "/ valid_seg else: return prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion =", "some rare cases, all queries in the current query class are easy reco_loss", "cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1]", ") * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()])", "self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2 =", "pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else:", "predict:(n, c, h, w) target:(n, h, w) weight (Tensor, optional): a manual rescaling", "super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept) if use_weight:", "keepdim=True ) ) # generate class mask for unlabeled data # prob_i_classes =", "self._ignore_index = ignore_index self.use_weight = use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else:", "[] seg_feat_low_entropy_list = [] # candidate anchor pixels seg_num_list = [] # the", "use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target): h, w", "(target != 255).bool() return rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict, target, percent,", "= label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u),", "ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion", "pred_h == h and pred_w == w loss = self._criterion1(preds, target) return loss", "self._criterion(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else: pred_h, pred_w =", "dtype=np.int32)] threshold = self.thresh if min_kept > 0: k_th = min(len(pred), min_kept) -", "* class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) )", "if use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754,", "= pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index) target = target * valid_mask.long()", "> 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag =", "use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight = use_weight if", "== aux_w and main_h == h and main_w == w ) loss1 =", "= [] for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select binary", "return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version \"\"\" def", "2 and main_h == aux_h and main_w == aux_w and main_h == h", "If given, has to be a Tensor of size \"nclasses\" \"\"\" assert not", "= cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat", "self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if num_valid >", "== h and main_w == w ) if self.use_weight: loss1 = self._criterion(main_pred, target)", ").cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds,", ").cuda() for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] >", "= 255 weight = batch_size * h * w / torch.sum(target != 255)", "some rare cases, a small mini-batch might only contain 1 or no semantic", "== h and main_w == w ) loss1 = self._criterion1(main_pred, target) loss2 =", "cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else:", "= self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else: pred_h, pred_w", "= memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx]", "prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w) valid_classes = []", ">= num_valid: threshold = 1.0 elif num_valid > 0: prob = input_prob[:, valid_flag]", "= rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list =", "np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold > self.thresh: threshold = new_threshold return", "= cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel =", "* h * w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag", "= torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index", "main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2 and main_h", "// ( factor * factor ) # int(self.min_kept_ratio * n * h *", ":] rep_mask_low_entropy = ( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = (", "= valid_mask.sum() prob = F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept", "dim=3 ).bool() # generate class mask for labeled data # label_l_mask = rep_mask_high_entropy[:", "pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept > 0: k_th", "torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask for labeled", "h, w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0,", "( len(seg_num_list) <= 1 ): # in some rare cases, a small mini-batch", "= torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333,", "= target.clone() temp_tar[target == 255] = 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() )", "(momentum_prototype == 0).all(): ema_decay = min(1 - 1 / i_iter, 0.999) positive_feat =", "= Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def __init__(self, aux_weight,", "== h and pred_w == w loss = self._criterion(preds, target) return loss class", "threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target =", "0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask", "temp_tar[target == 255] = 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch,", "F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem", "if momentum_prototype is None: return new_keys, reco_loss / valid_seg else: return prototype, new_keys,", "batch_size * h * w / torch.sum(target != 255) loss = weight *", "dim=0 ) negative_mask = rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys,", "prob[:, i, :, :] rep_mask_low_entropy = ( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool()", "1) # (num_labeled, h, w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u", "valid_mask = valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target = target.view(b, h,", "reco_loss = reco_loss + 0 * rep.sum() continue # apply negative key sampling", "high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"]", "h w c -> b c h w\") label = torch.clamp(label, min=1e-4, max=1.0)", "only contain 1 or no semantic class if momentum_prototype is None: return new_keys,", "def find_threshold(self, np_predict, np_target): # downsample 1/8 factor = self.factor predict = nd.zoom(np_predict,", "np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds =", "Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255,", "**cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__()", "# generate class mask for labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled] *", "of each class _, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2,", "= predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h, w = np_predict.shape threshold =", "): # select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat =", "and main_h == h and main_w == w ) if self.use_weight: loss1 =", "nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds, target): h, w", "1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,", "new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape:", "negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1,", "delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"]", "F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid: pass #", "min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept", "[] # the number of low_valid pixels in each class seg_proto_list = []", "0 * rep.sum() continue # apply negative key sampling from memory bank (with", "prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if", "i_iter=0, ): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"]", "+ ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat,", "0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight,", "i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0 ):", "rearrange predict = F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h, w = predict.shape", "-torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent", "= CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"]", "[] # candidate anchor pixels seg_num_list = [] # the number of low_valid", "generate class mask for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u =", "as np import scipy.ndimage as nd import torch import torch.nn as nn from", "loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321] return loss", "= aux_weight self._ignore_index = ignore_index self.use_weight = use_weight if not use_weight: self._criterion =", "valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False)", "percent, pred_teacher): batch_size, num_class, h, w = predict.shape with torch.no_grad(): # drop pixels", "0, 2, 3, 1 ) # (num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l,", "print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target,", "from torch.nn import functional as F from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target):", "- 1 new_array = np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold > self.thresh:", "valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept > 0:", "threshold = new_threshold return threshold def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target", "= prob[target, torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept > 0: _, index", "h w\") label = torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1)", "of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate class", "(num_labeled, h, w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute(", "entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh = np.percentile( entropy[target !=", "= [] # candidate anchor pixels seg_num_list = [] # the number of", "= pred <= threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] =", "0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion =", "target) return loss class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False,", ") negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat =", "self.thresh: threshold = new_threshold return threshold def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy()", "nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds, target): h, w = target.size(1), target.size(2)", "target.size(2) if self._aux_weight > 0: # require aux loss main_pred, aux_pred = preds", "seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0,", "self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag = input_label", "label = torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1) * (target", "1 ) # (num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0) #", "# (batch_size, num_cls, h, w) valid_classes = [] new_keys = [] for i", "rearrange(label, \"b h w c -> b c h w\") label = torch.clamp(label,", "3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce:", "weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321] return loss def compute_contra_memobank_loss(", "0).all(): ema_decay = min(1 - 1 / i_iter, 0.999) positive_feat = ( 1", "<= threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target", "* w / torch.sum(target != 255) loss = weight * F.cross_entropy(predict, target, ignore_index=255)", "class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version \"\"\" def __init__( self, ignore_index=255,", "import functional as F from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops", "= torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape(", "self.factor predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1)", "1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865,", "torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries, 1 + num_negative, num_feat) seg_logits =", "OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh", "__init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight", "= torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h,", "/ factor), order=1) target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor),", "> self.thresh: threshold = new_threshold return threshold def generate_new_target(self, predict, target): np_predict =", "torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate class mask for unlabeled data", ":, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:, i] == 0),", "seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True", "target): from einops import rearrange predict = F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls,", "cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1]", "aux_pred.size(3) assert ( len(preds) == 2 and main_h == aux_h and main_w ==", "continue # apply negative key sampling from memory bank (with no gradients) with", "for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for", "valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if num_valid > 0:", "> 0: _, index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1] if", "(num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h,", "factor), order=1) target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0)", "0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ]", "else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def", "= [] seg_feat_low_entropy_list = [] # candidate anchor pixels seg_num_list = [] #", "memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,)", "size \"nclasses\" \"\"\" assert not target.requires_grad input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob,", "current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp =", "): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight", "factor, 1.0 / factor), order=0) n, c, h, w = predict.shape min_kept =", "rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto", "momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) #", ") def forward(self, preds, target): h, w = target.size(1), target.size(2) if self._aux_weight >", "target, weight=None): \"\"\" Args: predict:(n, c, h, w) target:(n, h, w) weight (Tensor,", "drop pixels with high entropy prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob *", "* rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256]", "# in some rare cases, all queries in the current query class are", "weights = torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0,", "1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0,", "cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives", "= [] new_keys = [] for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i]", "the number of low_valid pixels in each class seg_proto_list = [] # the", "= torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in", "input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label", "num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold", "main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2 and", "OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self,", "easy reco_loss = reco_loss + 0 * rep.sum() continue # apply negative key", "1, num_feat) ).cuda() for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and", "c h w\") label = torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label),", "prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries, 1", "ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh,", "1) .cuda() ) # (num_queries, 1, num_feat) if momentum_prototype is not None: if", "ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion,", "= ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare cases, all queries in", "torch.log(label), dim=1) * (target != 255).bool() return rce.sum() / (target != 255).sum() def", "if self.min_kept > 0: _, index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) -", "else: loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss = loss1 +", "1 or no semantic class if momentum_prototype is None: return new_keys, torch.tensor(0.0) *", "self.min_kept > 0: _, index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1]", "weight=weights ) def forward(self, preds, target): h, w = target.size(1), target.size(2) if self._aux_weight", "= torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:, i]", "CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] )", "Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index =", "import scipy.ndimage as nd import torch import torch.nn as nn from torch.nn import", "i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th", "sampling from memory bank (with no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx", "torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg = len(seg_num_list) # number", "!= 255).bool() target[thresh_mask] = 255 weight = batch_size * h * w /", "ignore_index=ignore_index, weight=weights ) def forward(self, preds, target): h, w = target.size(1), target.size(2) if", "seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate class mask for unlabeled", "contain 1 or no semantic class if momentum_prototype is None: return new_keys, torch.tensor(0.0)", "forward(self, pred, target): b, c, h, w = pred.size() target = target.view(-1) valid_mask", "1.0 / factor), order=0) n, c, h, w = predict.shape min_kept = self.min_kept", "target): b, c, h, w = pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index)", "3, 1) # (num_labeled, h, w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True)", "nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0) n, c, h, w", "= min(len(pred), min_kept) - 1 new_array = np.partition(pred, k_th) new_threshold = new_array[k_th] if", "if momentum_prototype is None: return new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys,", "# require aux loss main_pred, aux_pred = preds main_h, main_w = main_pred.size(2), main_pred.size(3)", "self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target): h, w = target.size(1),", "target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0) n, c,", "cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3) #", "and main_h == aux_h and main_w == aux_w and main_h == h and", "target = target.view(-1) valid_mask = target.ne(self.ignore_index) target = target * valid_mask.long() num_valid =", "if min_kept >= num_valid: threshold = 1.0 elif num_valid > 0: prob =", "= [] # the number of low_valid pixels in each class seg_proto_list =", "= self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) valid_flag =", "return rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class,", "dim=0, keepdim=True ) ) # generate class mask for unlabeled data # prob_i_classes", "num_queries, 1, num_feat) ).cuda() for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0", "return prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight =", "1 / i_iter, 0.999) positive_feat = ( 1 - ema_decay ) * positive_feat", "target[thresh_mask] = 255 weight = batch_size * h * w / torch.sum(target !=", "kept_flag = pred <= threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds]", "= (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid)) elif", "> 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold =", "prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1 )", "# shape: [valid_seg, 256] valid_seg = len(seg_num_list) # number of valid classes prototype", "ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)", "255].detach().cpu().numpy().flatten(), percent ) thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] = 255", "float(thresh) self.min_kept = int(min_kept) self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict,", "target.size(1), target.size(2) if self._aux_weight > 0: # require aux loss main_pred, aux_pred =", "from memory bank (with no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx =", "pred_w == w loss = self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self,", "( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w, num_cls) label = rearrange(label, \"b", "dim=0) # (batch_size, num_cls, h, w) valid_classes = [] new_keys = [] for", "valid_classes.append(i) if ( len(seg_num_list) <= 1 ): # in some rare cases, a", "weight = batch_size * h * w / torch.sum(target != 255) loss =", "w c -> b c h w\") label = torch.clamp(label, min=1e-4, max=1.0) rce", "= reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None:", "k_th = min(len(pred), min_kept) - 1 new_array = np.partition(pred, k_th) new_threshold = new_array[k_th]", "def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight =", ") # (num_unlabeled, h, w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size,", "kept_mask.long() valid_mask = valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target = target.view(b,", ") else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module):", "- ema_decay ) * positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] =", "weight (Tensor, optional): a manual rescaling weight given to each class. If given,", "torch.arange(len(target), dtype=torch.long)] threshold = self.thresh if self.min_kept > 0: _, index = mask_prob.sort()", "num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() )", "ignore_index=ignore_index ) def forward(self, pred, target): b, c, h, w = pred.size() target", "aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False):", "threshold = 1.0 elif num_valid > 0: prob = input_prob[:, valid_flag] pred =", "torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask rep", "aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 =", "= rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l", "1 - ema_decay ) * positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]]", "self._aux_weight * loss2 else: pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h == h", "torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w) valid_classes = [] new_keys =", "find_threshold(self, np_predict, np_target): # downsample 1/8 factor = self.factor predict = nd.zoom(np_predict, (1.0,", "= self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss = loss1 + self._aux_weight *", "= mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh: threshold", "min(len(pred), min_kept) - 1 new_array = np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold", "class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:, i, :, :] rep_mask_low_entropy =", "if min_kept > 0: k_th = min(len(pred), min_kept) - 1 new_array = np.partition(pred,", "mask for i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:, i, :,", "self.use_weight = use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor(", "thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor(", "0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion =", "h and pred_w == w loss = self._criterion(preds, target) return loss class CriterionOhem(nn.Module):", "prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of", "torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some", "loss1 = self._criterion(main_pred, target) loss2 = self._criterion(aux_pred, target) loss = loss1 + self._aux_weight", "seg_num_list = [] # the number of low_valid pixels in each class seg_proto_list", "with high entropy prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob +", "high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat =", "class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate class mask for", "label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0) # prob_i_classes = prob_indices_l[label_l_mask]", "= mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target * kept_mask.long() valid_mask = valid_mask", "ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index self.thresh =", "mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target * kept_mask.long() valid_mask = valid_mask *", "!= self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if min_kept", "self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0,", "rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive", "main_w == w ) if self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred,", "- 1] if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target", "== w loss = self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255,", "!= self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if num_valid", "super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight )", "prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled,", "current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"]", "* kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target = target.view(b, h, w) return self.criterion(pred,", "= prob[:, i, :, :] rep_mask_low_entropy = ( prob_seg > current_class_threshold ) *", "* rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss = torch.tensor(0.0).cuda()", "1.0 / factor, 1.0 / factor), order=0) n, c, h, w = predict.shape", "1] if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target =", "0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh", "current query class are easy reco_loss = reco_loss + 0 * rep.sum() continue", "1) target = self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross", "to be a Tensor of size \"nclasses\" \"\"\" assert not target.requires_grad input_prob =", "* (label_l[:, i] == 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :,", "use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,", "0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0,", "h * w / torch.sum(target != 255) loss = weight * F.cross_entropy(predict, target,", "input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0]", "reco_loss / valid_seg else: return prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion", "= aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2 and main_h == aux_h and", "+ F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return new_keys,", "seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return new_keys, reco_loss /", "if cfg[\"net\"].get(\"aux_loss\", False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\":", "target = self.generate_new_target(input_prob, target) return self.criterion(predict, target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy", "self.thresh = float(thresh) self.min_kept = int(min_kept) if use_weight: weight = torch.FloatTensor( [ 0.8373,", "h and main_w == w ) loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred,", "0 ): # select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat", "sample: center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) #", "valid_flag.sum() if min_kept >= num_valid: threshold = 1.0 elif num_valid > 0: prob", "aux_pred = preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)", "prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): #", "> current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold ) *", "# (num_queries, 1 + num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 )", "a small mini-batch might only contain 1 or no semantic class if momentum_prototype", "high_valid_pixel[:, i] prob_seg = prob[:, i, :, :] rep_mask_low_entropy = ( prob_seg >", "num_feat) ).cuda() for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0]", "= np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict, 1).reshape((c,", "weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5,", "high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3)", "rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list = []", "positive sample: center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) )", "0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)", "threshold = self.thresh if min_kept > 0: k_th = min(len(pred), min_kept) - 1", "predict.shape with torch.no_grad(): # drop pixels with high entropy prob = torch.softmax(pred_teacher, dim=1)", "memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat", "with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,) )", "class _, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l = prob_indices_l.permute(0, 2, 3, 1)", "= len(seg_num_list) # number of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1,", "else: pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h == h and pred_w ==", "1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index )", "target) loss = loss1 + self._aux_weight * loss2 else: pred_h, pred_w = preds.size(2),", "= label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel", "assert pred_h == h and pred_w == w loss = self._criterion1(preds, target) return", "low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives =", ":, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask for labeled data #", "target) class OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version \"\"\" def __init__( self,", "Entropy Tensor Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ):", "= target.size(1), target.size(2) if self._aux_weight > 0: # require aux loss main_pred, aux_pred", "b, c, h, w = pred.size() target = target.view(-1) valid_mask = target.ne(self.ignore_index) target", "get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if cfg[\"net\"].get(\"aux_loss\", False) else 0", "queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list)", "torch.no_grad(): _, num_cls, h, w = predict.shape temp_tar = target.clone() temp_tar[target == 255]", "min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def forward(self, preds, target): h,", "= -torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(),", "valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat = torch.cat( (positive_feat, negative_feat), dim=1 ) #", "= torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1 ) #", "new_array[k_th] if new_threshold > self.thresh: threshold = new_threshold return threshold def generate_new_target(self, predict,", "low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p", "self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else: pred_h, pred_w =", "and pred_w == w loss = self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def", "- 1 / i_iter, 0.999) positive_feat = ( 1 - ema_decay ) *", "False) else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion =", "of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i", "F from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import rearrange predict", "preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert (", "True) prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1 ) # (num_unlabeled, h, w,", "* (target != 255).bool() return rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict, target,", "and main_w == aux_w and main_h == h and main_w == w )", "main_w == w ) loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss", "* (target != 255).bool() target[thresh_mask] = 255 weight = batch_size * h *", "target, percent, pred_teacher): batch_size, num_class, h, w = predict.shape with torch.no_grad(): # drop", "if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion", "predict = F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h, w = predict.shape temp_tar", "new_keys, reco_loss / valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"]", "0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037,", "valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long()", "w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0, 2,", "= cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"]", "else: # in some rare cases, all queries in the current query class", "h, w) target:(n, h, w) weight (Tensor, optional): a manual rescaling weight given", "\"\"\" assert not target.requires_grad input_prob = F.softmax(predict, 1) target = self.generate_new_target(input_prob, target) return", "== 2 and main_h == aux_h and main_w == aux_w and main_h ==", "def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh =", "1, 1) .cuda() ) # (num_queries, 1, num_feat) if momentum_prototype is not None:", "aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2 and main_h == aux_h and main_w", "bank (with no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat),", "of size \"nclasses\" \"\"\" assert not target.requires_grad input_prob = F.softmax(predict, 1) target =", "# weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5,", ".cuda(target.get_device()) ) return new_target def forward(self, predict, target, weight=None): \"\"\" Args: predict:(n, c,", "reco_loss + F.cross_entropy( seg_logits / temp, torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return", "0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5,", "target.ne(self.ignore_index) target = target * valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1)", "= torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1) thresh =", "min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept)", "1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds = np.where(valid_flag)[0] label = input_label[valid_flag]", "_, prob_indices_u = torch.sort(prob_u, 1, True) prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1", "loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred, target ) else: loss1 = self._criterion(main_pred,", "num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u),", "dim=1 ) # (num_queries, 1 + num_negative, num_feat) seg_logits = torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat,", "dim=1) * (target != 255).bool() return rce.sum() / (target != 255).sum() def compute_unsupervised_loss(predict,", "memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold:", "= target.ne(self.ignore_index) target = target * valid_mask.long() num_valid = valid_mask.sum() prob = F.softmax(pred,", "self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold)", "self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target * kept_mask.long() valid_mask", "keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if", "c -> b c h w\") label = torch.clamp(label, min=1e-4, max=1.0) rce =", "__init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__() self.ignore_index = ignore_index", "= preds.size(2), preds.size(3) assert pred_h == h and pred_w == w loss =", "loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight", "= nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0) n, c, h,", "1 new_array = np.partition(pred, k_th) new_threshold = new_array[k_th] if new_threshold > self.thresh: threshold", "0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda()", "w ) if self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred, target )", "(target != 255).bool() target[thresh_mask] = 255 weight = batch_size * h * w", "prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool() class_mask = torch.cat(", "loss2 = self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight * loss2 else: pred_h,", "aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) else: criterion = Criterion( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"] ) return", ") # int(self.min_kept_ratio * n * h * w) input_label = target.ravel().astype(np.int32) input_prob", ") return criterion class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight", "w / torch.sum(target != 255) loss = weight * F.cross_entropy(predict, target, ignore_index=255) #", "* high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the class seg_proto_list.append( torch.mean(", "a manual rescaling weight given to each class. If given, has to be", "1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() # weight = torch.FloatTensor( # [0.4762,", "negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat =", "not None: if not (momentum_prototype == 0).all(): ema_decay = min(1 - 1 /", "= input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold", "2, 3, 1) # (num_labeled, h, w, num_cls) _, prob_indices_u = torch.sort(prob_u, 1,", "temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled", "== 0).all(): ema_decay = min(1 - 1 / i_iter, 0.999) positive_feat = (", "= target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label != self.ignore_label valid_inds", "high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(),", ".long() .cuda(target.get_device()) ) return new_target def forward(self, predict, target, weight=None): \"\"\" Args: predict:(n,", "manual rescaling weight given to each class. If given, has to be a", "def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h, w = predict.shape with torch.no_grad():", "Tensor Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor,", "as nn from torch.nn import functional as F from .utils import dequeue_and_enqueue def", "torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target): b, c, h, w =", "* torch.log(prob + 1e-10), dim=1) thresh = np.percentile( entropy[target != 255].detach().cpu().numpy().flatten(), percent )", "1, True) prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1 ) # (num_unlabeled, h,", "1.0 / factor, 1.0 / factor), order=1) target = nd.zoom(np_target, (1.0, 1.0 /", ") * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center of the class seg_proto_list.append(", "= negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat = ( seg_proto[i]", "preds, target): h, w = target.size(1), target.size(2) if self._aux_weight > 0: # require", "queue_prtlis, queue_size, rep_teacher, momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n", "return new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else:", "prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls) _, prob_indices_u", "target.data.cpu().numpy() n, c, h, w = np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label =", "scipy.ndimage as nd import torch import torch.nn as nn from torch.nn import functional", "= ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor = factor self.criterion =", "0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda()", "for i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg = prob[:, i, :, :]", "numpy as np import scipy.ndimage as nd import torch import torch.nn as nn", "label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def forward(self, predict,", "self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds, target):", "is None: return new_keys, reco_loss / valid_seg else: return prototype, new_keys, reco_loss /", "w = target.size(1), target.size(2) if self._aux_weight > 0: # require aux loss main_pred,", "torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263,", "input_label[valid_flag] num_valid = valid_flag.sum() if num_valid > 0: prob = input_prob[:, valid_flag] pred", "ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight, ignore_index=ignore_index, **cfg_criterion[\"kwargs\"]", "= predict.shape with torch.no_grad(): # drop pixels with high entropy prob = torch.softmax(pred_teacher,", "torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self,", "kept_mask = mask_prob.le(threshold) target = target * kept_mask.long() valid_mask = valid_mask * kept_mask", "= ( 1 - ema_decay ) * positive_feat + ema_decay * momentum_prototype[ valid_classes[i]", "( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) # (num_queries, 1, num_feat)", "dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask rep = rep.permute(0,", "!= 255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h, w = predict.shape", "= prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls) _, prob_indices_u =", "* w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1)) valid_flag = input_label", "the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate class mask", "preds.size(3) assert pred_h == h and pred_w == w loss = self._criterion(preds, target)", "# prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3", "self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2", "self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight = use_weight if not use_weight: self._criterion", "1.1529, 1.0507, ] ).cuda() # weight = torch.FloatTensor( # [0.4762, 0.5, 0.4762, 1.4286,", "loss main_pred, aux_pred = preds main_h, main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w =", "mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target *", "positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) # (num_queries,", "target.clone() temp_tar[target == 255] = 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) #", "np import scipy.ndimage as nd import torch import torch.nn as nn from torch.nn", "pixels with high entropy prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob * torch.log(prob", "= torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool() # generate class mask for", "num_valid = valid_mask.sum() prob = F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if", "pred <= threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label", "* kept_mask.long() valid_mask = valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target =", "321, 321] return loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask,", "and main_w == w ) loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target)", "if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target", "> self.thresh: threshold = mask_prob[threshold_index] kept_mask = mask_prob.le(threshold) target = target * kept_mask.long()", "main_w = main_pred.size(2), main_pred.size(3) aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds) ==", "# [10, 321, 321] return loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u,", "select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda()", "semantic class if momentum_prototype is None: return new_keys, torch.tensor(0.0) * rep.sum() else: return", "dtype=np.int32)] kept_flag = pred <= threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy() input_label.fill(self.ignore_label)", "(prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) >", "target = target * kept_mask.long() valid_mask = valid_mask * kept_mask target = target.masked_fill_(~valid_mask,", "h, w, num_cls) label = rearrange(label, \"b h w c -> b c", "0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif", "h, w = predict.shape temp_tar = target.clone() temp_tar[target == 255] = 0 label", "is not None: if not (momentum_prototype == 0).all(): ema_decay = min(1 - 1", "0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append(", "compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher,", "class mask for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum(", "min(1 - 1 / i_iter, 0.999) positive_feat = ( 1 - ema_decay )", "ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept) if use_weight: weight = torch.FloatTensor( [", "loss class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem,", "index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index] kept_mask =", "= F.softmax(pred, dim=1) prob = (prob.transpose(0, 1)).reshape(c, -1) if self.min_kept > num_valid: pass", "ignore_index=255) # [10, 321, 321] return loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l,", "# the center of each class _, prob_indices_l = torch.sort(prob_l, 1, True) prob_indices_l", "1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() #", "__init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d, self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh)", "self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds, target): h, w =", "# select binary mask for i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg =", ") # generate class mask for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]]", "if self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred, target ) else: loss1", "Args: predict:(n, c, h, w) target:(n, h, w) weight (Tensor, optional): a manual", "rep, label_l, label_u, prob_l, prob_u, low_mask, high_mask, cfg, memobank, queue_prtlis, queue_size, rep_teacher, momentum_prototype=None,", "1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() # weight = torch.FloatTensor( # [0.4762, 0.5,", "else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target): b, c,", "/ factor), order=0) n, c, h, w = predict.shape min_kept = self.min_kept //", "mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh: threshold =", "apply negative key sampling from memory bank (with no gradients) with torch.no_grad(): negative_feat", "= torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg = len(seg_num_list) #", "len(seg_feat_low_entropy_list[i]), size=(num_queries,) ) anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare", "ema_decay ) * positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone()", "seg_feat_all_list = [] seg_feat_low_entropy_list = [] # candidate anchor pixels seg_num_list = []", "in some rare cases, all queries in the current query class are easy", "= new_array[k_th] if new_threshold > self.thresh: threshold = new_threshold return threshold def generate_new_target(self,", "> 0: prob = prob.masked_fill_(~valid_mask, 1) mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] threshold =", "temp_tar = target.clone() temp_tar[target == 255] = 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda()", "= nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1) target =", "self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss = loss1 + self._aux_weight * loss2", "reco_loss + 0 * rep.sum() continue # apply negative key sampling from memory", "from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from einops import rearrange predict =", "nn from torch.nn import functional as F from .utils import dequeue_and_enqueue def compute_rce_loss(predict,", "0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem( aux_weight,", "prob_indices_u = prob_indices_u.permute( 0, 2, 3, 1 ) # (num_unlabeled, h, w, num_cls)", "min_kept) def forward(self, preds, target): h, w = target.size(1), target.size(2) if self._aux_weight >", "= high_valid_pixel[:, i] prob_seg = prob[:, i, :, :] rep_mask_low_entropy = ( prob_seg", "number of low_valid pixels in each class seg_proto_list = [] # the center", "predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1) target", ").bool() # generate class mask for labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled]", "num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] threshold", "self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): super(OhemCrossEntropy2d,", "prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold )", "= use_weight if not use_weight: self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [", "min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index,", "rep.sum() else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg", ") ) # generate class mask for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled", "== w ) if self.use_weight: loss1 = self._criterion(main_pred, target) + self._criterion1( main_pred, target", "None: return new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum()", "self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept) if use_weight: weight =", "0: _, index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index]", "1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0,", "in the current query class are easy reco_loss = reco_loss + 0 *", "shape: [valid_seg, 256] valid_seg = len(seg_num_list) # number of valid classes prototype =", ") negative_mask = rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i],", "rep = rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0, 2, 3, 1) seg_feat_all_list", "rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l =", "= valid_flag.sum() if min_kept >= num_valid: threshold = 1.0 elif num_valid > 0:", "seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare cases, all queries in the current", "( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample: center", "if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if ( len(seg_num_list) <= 1 ): #", "torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index )", "elif num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)]", "cfg[\"num_negatives\"] num_feat = rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l,", "self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", weight=weight, ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\",", "self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1", "key sampling from memory bank (with no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda()", "return loss class CriterionOhem(nn.Module): def __init__( self, aux_weight, thresh=0.7, min_kept=100000, ignore_index=255, use_weight=False, ):", "self.factor = factor self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample 1/8", "= np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if num_valid > 0: prob", "len(seg_num_list) <= 1 ): # in some rare cases, a small mini-batch might", "thresh, min_kept) def forward(self, preds, target): h, w = target.size(1), target.size(2) if self._aux_weight", "index = mask_prob.sort() threshold_index = index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh:", "255).sum() def compute_unsupervised_loss(predict, target, percent, pred_teacher): batch_size, num_class, h, w = predict.shape with", "high_mask rep = rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0, 2, 3, 1)", "[10, 321, 321] return loss def compute_contra_memobank_loss( rep, label_l, label_u, prob_l, prob_u, low_mask,", "(batch_size, num_cls, h, w) valid_classes = [] new_keys = [] for i in", "torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i in range(valid_seg): if ( len(seg_feat_low_entropy_list[i])", "1, num_feat) if momentum_prototype is not None: if not (momentum_prototype == 0).all(): ema_decay", "seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) # (num_queries, 1, num_feat) if", "num_valid: threshold = 1.0 elif num_valid > 0: prob = input_prob[:, valid_flag] pred", "= predict.shape min_kept = self.min_kept // ( factor * factor ) # int(self.min_kept_ratio", "<= 1 ): # in some rare cases, a small mini-batch might only", "input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >= num_valid: threshold = 1.0 elif num_valid", "aux_h, aux_w = aux_pred.size(2), aux_pred.size(3) assert ( len(preds) == 2 and main_h ==", "= torch.cosine_similarity( anchor_feat.unsqueeze(1), all_feat, dim=2 ) reco_loss = reco_loss + F.cross_entropy( seg_logits /", "num_feat) if momentum_prototype is not None: if not (momentum_prototype == 0).all(): ema_decay =", "> 0: k_th = min(len(pred), min_kept) - 1 new_array = np.partition(pred, k_th) new_threshold", "super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight = use_weight if not", "1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507, ] ).cuda() # weight = torch.FloatTensor(", "torch.zeros(num_queries).long().cuda() ) if momentum_prototype is None: return new_keys, reco_loss / valid_seg else: return", "n, c, h, w = np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32)", "+ self._aux_weight * loss2 else: pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h ==", "target, ignore_index=255) # [10, 321, 321] return loss def compute_contra_memobank_loss( rep, label_l, label_u,", "entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] = 255 weight = batch_size * h", "0.0, 1.0, 1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index,", ":, :] rep_mask_low_entropy = ( prob_seg > current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy =", "valid_seg = len(seg_num_list) # number of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries,", "def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n, c, h,", "== 0) # prob_i_classes = prob_indices_l[label_l_mask] class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool()", "= torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries, 1 + num_negative, num_feat) seg_logits", "float(thresh) self.min_kept = int(min_kept) if use_weight: weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866,", "# positive sample: center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True )", "= ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries, 1, 1) .cuda() ) # (num_queries, 1,", "return threshold def generate_new_target(self, predict, target): np_predict = predict.data.cpu().numpy() np_target = target.data.cpu().numpy() n,", "= torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg = len(seg_num_list) # number of valid", "1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,", "in range(valid_seg): if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0 ): #", "w) weight (Tensor, optional): a manual rescaling weight given to each class. If", "def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index", "center of the class seg_proto_list.append( torch.mean( rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True ) ) # generate", "else 0 ) ignore_index = cfg[\"dataset\"][\"ignore_label\"] if cfg_criterion[\"type\"] == \"ohem\": criterion = CriterionOhem(", "/ factor, 1.0 / factor), order=0) n, c, h, w = predict.shape min_kept", "valid_classes = [] new_keys = [] for i in range(num_segments): low_valid_pixel_seg = low_valid_pixel[:,", "num_cls) label = rearrange(label, \"b h w c -> b c h w\")", "low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask high_valid_pixel = torch.cat((label_l, label_u), dim=0) *", "1)).reshape(c, -1) if self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid >", "if not (momentum_prototype == 0).all(): ema_decay = min(1 - 1 / i_iter, 0.999)", "= self._criterion(main_pred, target) + self._criterion1( main_pred, target ) else: loss1 = self._criterion(main_pred, target)", "preds.size(2), preds.size(3) assert pred_h == h and pred_w == w loss = self._criterion1(preds,", "forward(self, preds, target): h, w = target.size(1), target.size(2) if self._aux_weight > 0: #", "len(seg_num_list) # number of valid classes prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat)", "new_keys, reco_loss / valid_seg else: return prototype, new_keys, reco_loss / valid_seg def get_criterion(cfg):", "= low_valid_pixel[:, i] # select binary mask for i-th class high_valid_pixel_seg = high_valid_pixel[:,", "queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() > 0: seg_num_list.append(int(low_valid_pixel_seg.sum().item())) valid_classes.append(i) if (", "= rep.shape[1] num_labeled = label_l.shape[0] num_segments = label_l.shape[1] low_valid_pixel = torch.cat((label_l, label_u), dim=0)", "target) + self._criterion1( main_pred, target ) else: loss1 = self._criterion(main_pred, target) loss2 =", ":low_rank].eq(i), dim=3).bool() class_mask = torch.cat( (class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0", "torch.clamp(label, min=1e-4, max=1.0) rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool()", ") else: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"mean\", ignore_index=ignore_index ) def forward(self, pred, target): b,", "# drop pixels with high entropy prob = torch.softmax(pred_teacher, dim=1) entropy = -torch.sum(prob", "* num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat )", "current_class_threshold ) * low_valid_pixel_seg.bool() rep_mask_high_entropy = ( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool()", "prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls) _, prob_indices_u = torch.sort(prob_u,", "* F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321] return loss def compute_contra_memobank_loss( rep,", "delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank,", "valid_flag.sum() if num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label),", "= batch_size * h * w / torch.sum(target != 255) loss = weight", "assert ( len(preds) == 2 and main_h == aux_h and main_w == aux_w", "loss2 else: pred_h, pred_w = preds.size(2), preds.size(3) assert pred_h == h and pred_w", "= valid_mask * kept_mask target = target.masked_fill_(~valid_mask, self.ignore_index) target = target.view(b, h, w)", "0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529,", "from einops import rearrange predict = F.softmax(predict, dim=1) with torch.no_grad(): _, num_cls, h,", "i] # select binary mask for i-th class high_valid_pixel_seg = high_valid_pixel[:, i] prob_seg", "nd import torch import torch.nn as nn from torch.nn import functional as F", "self).__init__() self.ignore_label = ignore_label self.thresh = float(thresh) self.min_kept = int(min_kept) self.factor = factor", "of low_valid pixels in each class seg_proto_list = [] # the center of", "main_h == h and main_w == w ) loss1 = self._criterion1(main_pred, target) loss2", "# apply negative key sampling from memory bank (with no gradients) with torch.no_grad():", "pred_w == w loss = self._criterion(preds, target) return loss class CriterionOhem(nn.Module): def __init__(", "1.0 / factor), order=1) target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 /", "len(negative_feat), size=(num_queries * num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives,", "== aux_w and main_h == h and main_w == w ) if self.use_weight:", "1.0, 1.0, ] ).cuda() self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights )", "each class seg_proto_list = [] # the center of each class _, prob_indices_l", "prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3 ).bool()", "= nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds, target): h, w = target.size(1),", "(with no gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries", "= [] # the center of each class _, prob_indices_l = torch.sort(prob_l, 1,", "pred_teacher): batch_size, num_class, h, w = predict.shape with torch.no_grad(): # drop pixels with", "[ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "positive_feat = ( 1 - ema_decay ) * positive_feat + ema_decay * momentum_prototype[", "negative_mask = rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i],", "label = input_label[valid_flag] num_valid = valid_flag.sum() if num_valid > 0: prob = input_prob[:,", "F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321] return loss def compute_contra_memobank_loss( rep, label_l,", "preds.size(2), preds.size(3) assert pred_h == h and pred_w == w loss = self._criterion(preds,", "np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if num_valid > 0: prob =", "use_weight=False, ): super(CriterionOhem, self).__init__() self._aux_weight = aux_weight self._criterion1 = OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept,", "= 1.0 elif num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label,", "= ( prob_seg < current_class_negative_threshold ) * high_valid_pixel_seg.bool() seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()]) seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy]) # positive sample:", "mask for labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] ==", "num_valid > 0: prob = input_prob[:, valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag", "label = input_label[valid_inds].copy() input_label.fill(self.ignore_label) input_label[valid_inds] = label new_target = ( torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device())", "num_negatives,) ) negative_feat = negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat", "class Criterion(nn.Module): def __init__(self, aux_weight, ignore_index=255, use_weight=False): super(Criterion, self).__init__() self._aux_weight = aux_weight self._ignore_index", ") loss1 = self._criterion1(main_pred, target) loss2 = self._criterion2(aux_pred, target) loss = loss1 +", "new_keys, torch.tensor(0.0) * rep.sum() else: return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum() else: reco_loss", "dim=0) * high_mask rep = rep.permute(0, 2, 3, 1) rep_teacher = rep_teacher.permute(0, 2,", "np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold valid_inds = valid_inds[kept_flag] label = input_label[valid_inds].copy()", "rescaling weight given to each class. If given, has to be a Tensor", "( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare cases, all queries in the", "= rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i],", "w = predict.shape with torch.no_grad(): # drop pixels with high entropy prob =", "rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool() return rce.sum() /", "None: if not (momentum_prototype == 0).all(): ema_decay = min(1 - 1 / i_iter,", "= loss1 + self._aux_weight * loss2 else: pred_h, pred_w = preds.size(2), preds.size(3) assert", "w = np_predict.shape threshold = self.find_threshold(np_predict, np_target) input_label = np_target.ravel().astype(np.int32) input_prob = np.rollaxis(np_predict,", "num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid > 0: prob = prob.masked_fill_(~valid_mask, 1)", "* positive_feat + ema_decay * momentum_prototype[ valid_classes[i] ] prototype[valid_classes[i]] = positive_feat.clone() all_feat =", "= nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0,", "-1) if self.min_kept > num_valid: pass # print('Labels: {}'.format(num_valid)) elif num_valid > 0:", "self.min_kept // ( factor * factor ) # int(self.min_kept_ratio * n * h", "anchor_feat = ( seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda() ) else: # in some rare cases, all queries", "0.8333, 0.5263, 0.5882, # 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda() self.criterion = torch.nn.CrossEntropyLoss(", "else: reco_loss = torch.tensor(0.0).cuda() seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256] valid_seg =", "valid_flag] pred = prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold valid_inds =", "> 0 ): # select anchor pixel seg_low_entropy_idx = torch.randint( len(seg_feat_low_entropy_list[i]), size=(num_queries,) )", "= OhemCrossEntropy2dTensor( ignore_index, thresh, min_kept, use_weight ) self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept) def", "h, w = target.size(1), target.size(2) if self._aux_weight > 0: # require aux loss", "reco_loss / valid_seg def get_criterion(cfg): cfg_criterion = cfg[\"criterion\"] aux_weight = ( cfg[\"net\"][\"aux_loss\"][\"loss_weight\"] if", "nn.CrossEntropyLoss(ignore_index=ignore_index) else: weights = torch.FloatTensor( [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0,", "# current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold = cfg[\"current_class_threshold\"] current_class_negative_threshold =", ") def forward(self, pred, target): b, c, h, w = pred.size() target =", "unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:, :, :,", "torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539,", "class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy * class_mask keys = rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue(", "pixels in each class seg_proto_list = [] # the center of each class", "= min(1 - 1 / i_iter, 0.999) positive_feat = ( 1 - ema_decay", "n * h * w) input_label = target.ravel().astype(np.int32) input_prob = np.rollaxis(predict, 1).reshape((c, -1))", "forward(self, predict, target, weight=None): \"\"\" Args: predict:(n, c, h, w) target:(n, h, w)", "percent ) thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool() target[thresh_mask] = 255 weight", "(label_l[:, i] == 0), class_mask_u), dim=0 ) negative_mask = rep_mask_high_entropy * class_mask keys", "self).__init__() self.ignore_index = ignore_index self.thresh = float(thresh) self.min_kept = int(min_kept) if use_weight: weight", "torch.from_numpy(input_label.reshape(target.size())) .long() .cuda(target.get_device()) ) return new_target def forward(self, predict, target, weight=None): \"\"\" Args:", "0.999) positive_feat = ( 1 - ema_decay ) * positive_feat + ema_decay *", "Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False ): super(OhemCrossEntropy2dTensor, self).__init__()", "all_feat = torch.cat( (positive_feat, negative_feat), dim=1 ) # (num_queries, 1 + num_negative, num_feat)", "momentum_prototype=None, i_iter=0, ): # current_class_threshold: delta_p (0.3) # current_class_negative_threshold: delta_n (1) current_class_threshold =", "mask for unlabeled data # prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]] class_mask_u = torch.sum( prob_indices_u[:,", "= rep_teacher[negative_mask].detach() new_keys.append( dequeue_and_enqueue( keys=keys, queue=memobank[i], queue_ptr=queue_prtlis[i], queue_size=queue_size[i], ) ) if low_valid_pixel_seg.sum() >", "class mask for labeled data # label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i]", "torch.nn import functional as F from .utils import dequeue_and_enqueue def compute_rce_loss(predict, target): from", "= nn.CrossEntropyLoss(ignore_index=ignore_index) self._criterion1 = nn.CrossEntropyLoss( ignore_index=ignore_index, weight=weights ) def forward(self, preds, target): h,", "= negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0) .unsqueeze(0) .repeat(num_queries,", "k_th) new_threshold = new_array[k_th] if new_threshold > self.thresh: threshold = new_threshold return threshold", "weight = torch.FloatTensor( [ 0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786,", "= torch.cat((label_l, label_u), dim=0) * high_mask rep = rep.permute(0, 2, 3, 1) rep_teacher", "w, num_cls) prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w) valid_classes", "aux_w and main_h == h and main_w == w ) if self.use_weight: loss1", "prototype = torch.zeros( (prob_indices_l.shape[-1], num_queries, 1, num_feat) ).cuda() for i in range(valid_seg): if", "ignore_index=ignore_index ) elif reduce: self.criterion = torch.nn.CrossEntropyLoss( reduction=\"none\", ignore_index=ignore_index ) else: self.criterion =", "if ( len(seg_feat_low_entropy_list[i]) > 0 and memobank[valid_classes[i]][0].shape[0] > 0 ): # select anchor", "): # in some rare cases, a small mini-batch might only contain 1", "self).__init__() self._aux_weight = aux_weight self._ignore_index = ignore_index self.use_weight = use_weight if not use_weight:", "negative_feat[high_entropy_idx] negative_feat = negative_feat.reshape( num_queries, num_negatives, num_feat ) positive_feat = ( seg_proto[i] .unsqueeze(0)", "prob[label, np.arange(len(label), dtype=np.int32)] kept_flag = pred <= threshold valid_inds = valid_inds[kept_flag] label =", "= prob[label, np.arange(len(label), dtype=np.int32)] threshold = self.thresh if min_kept > 0: k_th =", "cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries = cfg[\"num_queries\"] num_negatives = cfg[\"num_negatives\"] num_feat =", "255] = 0 label = ( F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda() ) # (batch, h, w,", "= torch.nn.CrossEntropyLoss(ignore_index=ignore_label) def find_threshold(self, np_predict, np_target): # downsample 1/8 factor = self.factor predict", "0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865,", "current_class_negative_threshold = cfg[\"current_class_negative_threshold\"] low_rank, high_rank = cfg[\"low_rank\"], cfg[\"high_rank\"] temp = cfg[\"temperature\"] num_queries =", "factor, 1.0 / factor), order=1) target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0", "threshold_index = index[min(len(index), self.min_kept) - 1] if mask_prob[threshold_index] > self.thresh: threshold = mask_prob[threshold_index]", "gradients) with torch.no_grad(): negative_feat = memobank[valid_classes[i]][0].clone().cuda() high_entropy_idx = torch.randint( len(negative_feat), size=(num_queries * num_negatives,)", "and pred_w == w loss = self._criterion1(preds, target) return loss class OhemCrossEntropy2d(nn.Module): def", "= np.where(valid_flag)[0] label = input_label[valid_flag] num_valid = valid_flag.sum() if min_kept >= num_valid: threshold", "Tensor of size \"nclasses\" \"\"\" assert not target.requires_grad input_prob = F.softmax(predict, 1) target", "OhemCrossEntropy2dTensor(nn.Module): \"\"\" Ohem Cross Entropy Tensor Version \"\"\" def __init__( self, ignore_index=255, thresh=0.7,", "class. If given, has to be a Tensor of size \"nclasses\" \"\"\" assert", "* rep.sum() continue # apply negative key sampling from memory bank (with no" ]